Rust vs python 程序性能结果问题

Rust vs python program performance results question

我写了一个计算单词的程序。

这是程序

use std::collections::HashMap;
use std::io;
use std::io::prelude::*;

#[derive(Debug)]
struct Entry {
    word: String,
    count: u32,
}

static SEPARATORS: &'static [char] = &[
    ' ', ',', '.', '!', '?', '\'', '"', '\n', '(', ')', '#', '{', '}', '[', ']', '-', ';', ':',
];

fn main() {
    if let Err(err) = try_main() {
        if err.kind() == std::io::ErrorKind::BrokenPipe {
            return;
        }
        // Ignore any error that may occur while writing to stderr.
        let _ = writeln!(std::io::stderr(), "{}", err);
    }
}

fn try_main() -> Result<(), std::io::Error> {
    let mut words: HashMap<String, u32> = HashMap::new();
    let stdin = io::stdin();
    for result in stdin.lock().lines() {
        let line = result?;
        line_processor(line, &mut words)
    }
    output(&mut words)?;
    Ok(())
}

fn line_processor(line: String, words: &mut HashMap<String, u32>) {
    let mut word = String::new();

    for c in line.chars() {
        if SEPARATORS.contains(&c) {
            add_word(word, words);
            word = String::new();
        } else {
            word.push_str(&c.to_string());
        }
    }
}

fn add_word(word: String, words: &mut HashMap<String, u32>) {
    if word.len() > 0 {
        if words.contains_key::<str>(&word) {
            words.insert(word.to_string(), words.get(&word).unwrap() + 1);
        } else {
            words.insert(word.to_string(), 1);
        }
        // println!("word >{}<", word.to_string())
    }
}

fn output(words: &mut HashMap<String, u32>) -> Result<(), std::io::Error> {
    let mut stack = Vec::<Entry>::new();

    for (k, v) in words {
        stack.push(Entry {
            word: k.to_string(),
            count: *v,
        });
    }

    stack.sort_by(|a, b| b.count.cmp(&a.count));
    stack.reverse();

    let stdout = io::stdout();
    let mut stdout = stdout.lock();
    while let Some(entry) = stack.pop() {
        writeln!(stdout, "{}\t{}", entry.count, entry.word)?;
    }
    Ok(())
}

它将一些任意文本文件作为输入并对单词进行计数以产生一些输出,例如:

15  the
14  in
11  are
10  and
10  of
9   species
9   bats
8   horseshoe
8   is
6   or
6   as
5   which
5   their

我是这样编译的:

cargo build --release

我运行是这样的:

cat wiki-sample.txt | ./target/release/wordstats  | head -n 50

wiki-sample.txt 我使用的文件是 here

我将执行时间与 python (3.8) 版本进行了比较:

import sys
from collections import defaultdict

# import unidecode

seps = set(
    [
        " ",
        ",",
        ".",
        "!",
        "?",
        "'",
        '"',
        "\n",
        "(",
        ")",
        "#",
        "{",
        "}",
        "[",
        "]",
        "-",
        ";",
        ":",
    ]
)


def out(result):
    for i in result:
        print(f"{i[1]}\t{i[0]}")


if __name__ == "__main__":
    c = defaultdict(int)

    for line in sys.stdin:
        words = line.split(" ")
        for word in words:
            clean_word = []
            for char in word:
                if char not in seps and char:
                    clean_word.append(char)
            r = "".join(clean_word)
            # r = unidecode.unidecode(r)
            if r:
                c[r] += 1

    r = sorted(list(c.items()), key=lambda x: -x[1])
    try:
        out(r)
    except BrokenPipeError as e:
        pass

我运行是这样的:

cat /tmp/t.txt | ./venv/bin/python3 src/main.py | head -n 100

问题:Rust 的性能“仅”提高 ~3-4 倍是否正常?

我也想知道我是否没有遗漏一些东西,因为我发现“仅”100Mb 数据的计算时间相当长。我不认为(天真地)有一些低大 O 的处理,我可能是错的。

我习惯于将一些 python 代码与 go、java 或 vlang 中的一些等效代码进行比较,对于这些工作台,我经常有 20 倍到 100 倍的因数速度。

也许 cpython 擅长这种处理,也许我在 rust 程序中遗漏了一些东西(我对 rust 很陌生)以使其更高效。

我很害怕在我的测试中错过一些重要的东西,但是有没有想过这个?

编辑:根据大家的建议,我现在的版本如下:

use std::collections::HashMap;
use std::io;
use std::io::prelude::*;

#[derive(Debug)]
struct Entry<'a> {
    word: &'a str, // word: String,
    count: u32,
}

static SEPARATORS: &'static [char] = &[
    ' ', ',', '.', '!', '?', '\'', '"', '\n', '(', ')', '#', '{', '}', '[', ']', '-', ';', ':',
];

fn main() {
    if let Err(err) = try_main() {
        if err.kind() == std::io::ErrorKind::BrokenPipe {
            return;
        }
        // Ignore any error that may occur while writing to stderr.
        let _ = writeln!(std::io::stderr(), "{}", err);
    }
}

fn try_main() -> Result<(), std::io::Error> {
    let mut words: HashMap<String, u32> = HashMap::new();
    let stdin = io::stdin();
    for result in stdin.lock().lines() {
        let line = result?;
        line_processor(line, &mut words)
    }
    output(&mut words)?;
    Ok(())
}

fn line_processor(line: String, words: &mut HashMap<String, u32>) {
    let mut l = line.as_str();
    loop {
        if let Some(pos) = l.find(|c: char| SEPARATORS.contains(&c)) {
            let (head, tail) = l.split_at(pos);
            add_word(head.to_owned(), words);
            l = &tail[1..];
        } else {
            break;
        }
    }
}

fn add_word(word: String, words: &mut HashMap<String, u32>) {
    if word.len() > 0 {
        let count = words.entry(word).or_insert(0);
        *count += 1;
    }
}

fn output(words: &mut HashMap<String, u32>) -> Result<(), std::io::Error> {
    let mut stack = Vec::<Entry>::new();

    for (k, v) in words {
        stack.push(Entry {
            word: k.as_str(), // word: k.to_string(),
            count: *v,
        });
    }

    stack.sort_by(|a, b| a.count.cmp(&b.count));

    let stdout = io::stdout();
    let mut stdout = stdout.lock();
    while let Some(entry) = stack.pop() {
        writeln!(stdout, "{}\t{}", entry.count, entry.word)?;
    }
    Ok(())
}

现在在我的电脑上大约需要 2.6'。这比 python 版本好得多,快了将近 10 倍,后者更好,但仍然不是我预期的(这不是真正的问题)。可能还有一些我暂时没有想到的其他优化。

add_word() 中,您使用 word (.to_string()) 的新副本规避了借用问题。

对于要增加的所有计数器,您只需访问一次。

let count = words.entry(word).or_insert(0);
*count += 1;

您还可以通过直接在行上作为 &str.

避免在 line_processor() 中进行许多字符串重新分配
let mut l = line.as_str();
loop {
    if let Some(pos) = l.find(|c: char| SEPARATORS.contains(&c)) {
        let (head, tail) = l.split_at(pos);
        add_word(head.to_owned(), words);
        l = &tail[1..];
    } else {
        break;
    }
}

当涉及到 output() 函数时,会执行新的字符串副本以初始化 Entry 结构。 我们可以将 Entry 更改为

#[derive(Debug)]
struct Entry<'a> {
    word: &'a str,  // word: String,
    count: u32,
}

然后只处理原始字符串中的 &str(在 words 中)。

stack.push(Entry {
    word: k.as_str(), // word: k.to_string(),
    count: *v,
});

此外,如果我们反转排序标准,则可以避免排序向量的就地反转。

stack.sort_by(|a, b| a.count.cmp(&b.count));
// stack.reverse();

我想这些是这个例子中的主要瓶颈。

在我的电脑上,使用 <wiki-sample.txt >/dev/null 计时可提供以下加速:

original -->  × 1 (reference)
using l.find()+l.split_at() --> × 1.48
using words.entry() --> × 1.25
using both l.find()+l.split_at() and words.entry() --> × 1.73
using all the preceding and &str in Entry and avoiding reverse --> x 2.05

您可以通过避免 UTF-8 验证来加快搜索速度,并通过使用 bstr crate 使您的搜索更加智能。

use std::io;
use std::io::prelude::*;

use bstr::{BStr, BString, io::BufReadExt, ByteSlice};

type HashMap<K, V> = fnv::FnvHashMap<K, V>;

#[derive(Debug)]
struct Entry<'a> {
    word: &'a BStr,
    count: u32,
}

static SEPSET: &'static [u8] = b" ,.!?'\"\n()#{}[]-;:";

fn main() {
    if let Err(err) = try_main() {
        if err.kind() == std::io::ErrorKind::BrokenPipe {
            return;
        }
        // Ignore any error that may occur while writing to stderr.
        let _ = writeln!(std::io::stderr(), "{}", err);
    }
}

fn try_main() -> Result<(), std::io::Error> {
    let mut words: HashMap<BString, u32> = HashMap::default();
    io::stdin().lock().for_byte_line(|line| {
        line_processor(line, &mut words);
        Ok(true)
    })?;
    output(&mut words)?;
    Ok(())
}

fn line_processor(mut line: &[u8], words: &mut HashMap<BString, u32>) {
    loop {
        if let Some(pos) = line.find_byteset(SEPSET) {
            let (head, tail) = line.split_at(pos);
            add_word(head, words);
            line = &tail[1..];
        } else {
            break;
        }
    }
}

fn add_word(word: &[u8], words: &mut HashMap<BString, u32>) {
    if word.len() > 0 {
        // The vast majority of the time we are looking
        // up a word that already exists, so don't bother
        // allocating in the common path. This means the
        // uncommon path does two lookups, but it's so
        // uncommon that the overall result is much faster.
        if let Some(count) = words.get_mut(word.as_bstr()) {
            *count += 1;
        } else {
            words.insert(BString::from(word), 1);
        }
    }
}

fn output(words: &mut HashMap<BString, u32>) -> Result<(), std::io::Error> {
    let mut stack = Vec::<Entry>::new();

    for (k, v) in words {
        stack.push(Entry {
            word: k.as_bstr(),
            count: *v,
        });
    }

    stack.sort_by(|a, b| a.count.cmp(&b.count));

    let stdout = io::stdout();
    let mut stdout = stdout.lock();
    while let Some(entry) = stack.pop() {
        writeln!(stdout, "{}\t{}", entry.count, entry.word)?;
    }
    Ok(())
}

至此,程序的大部分时间都花在了hashmap的查找上。 (这就是为什么我改用上面的 fnv 的原因。)所以在这一点上让它更快可能意味着使用不同的策略来维护单词映射。我的猜测是大多数单词的长度只有几个字节,因此您可以对这些单词进行特殊处理,以使用数组作为映射而不是哈希映射。它可能会大大加快速度,但也会使您的原始程序更加复杂。

至于这个速度是否符合预期,我会说,“我觉得差不多”。您的程序正在对 1450 万字的文档中的每个字执行一个操作。上面的程序在我的机器上大约需要 1.7 秒,这意味着它每秒处理大约 830 万个单词,或者每微秒大约 8.3 个单词。这似乎是正确的,因为每个词都会进行哈希查找并需要搜索才能找到下一个词。