Rust 与 Python 程序性能结果问题

2024-03-20

我写了一个计算字数的程序。

这是程序

use std::collections::HashMap;
use std::io;
use std::io::prelude::*;

#[derive(Debug)]
struct Entry {
    word: String,
    count: u32,
}

static SEPARATORS: &'static [char] = &[
    ' ', ',', '.', '!', '?', '\'', '"', '\n', '(', ')', '#', '{', '}', '[', ']', '-', ';', ':',
];

fn main() {
    if let Err(err) = try_main() {
        if err.kind() == std::io::ErrorKind::BrokenPipe {
            return;
        }
        // Ignore any error that may occur while writing to stderr.
        let _ = writeln!(std::io::stderr(), "{}", err);
    }
}

fn try_main() -> Result<(), std::io::Error> {
    let mut words: HashMap<String, u32> = HashMap::new();
    let stdin = io::stdin();
    for result in stdin.lock().lines() {
        let line = result?;
        line_processor(line, &mut words)
    }
    output(&mut words)?;
    Ok(())
}

fn line_processor(line: String, words: &mut HashMap<String, u32>) {
    let mut word = String::new();

    for c in line.chars() {
        if SEPARATORS.contains(&c) {
            add_word(word, words);
            word = String::new();
        } else {
            word.push_str(&c.to_string());
        }
    }
}

fn add_word(word: String, words: &mut HashMap<String, u32>) {
    if word.len() > 0 {
        if words.contains_key::<str>(&word) {
            words.insert(word.to_string(), words.get(&word).unwrap() + 1);
        } else {
            words.insert(word.to_string(), 1);
        }
        // println!("word >{}<", word.to_string())
    }
}

fn output(words: &mut HashMap<String, u32>) -> Result<(), std::io::Error> {
    let mut stack = Vec::<Entry>::new();

    for (k, v) in words {
        stack.push(Entry {
            word: k.to_string(),
            count: *v,
        });
    }

    stack.sort_by(|a, b| b.count.cmp(&a.count));
    stack.reverse();

    let stdout = io::stdout();
    let mut stdout = stdout.lock();
    while let Some(entry) = stack.pop() {
        writeln!(stdout, "{}\t{}", entry.count, entry.word)?;
    }
    Ok(())
}

它将任意文本文件作为输入并计算单词数以产生一些输出,例如:

15  the
14  in
11  are
10  and
10  of
9   species
9   bats
8   horseshoe
8   is
6   or
6   as
5   which
5   their

我这样编译它:

cargo build --release

我这样运行:

cat wiki-sample.txt | ./target/release/wordstats  | head -n 50

我使用的 wiki-sample.txt 文件是here https://www.dropbox.com/s/3p3cwhk04va2o8g/wiki-sample.txt?dl=1

我将执行时间与 python (3.8) 版本进行了比较:

import sys
from collections import defaultdict

# import unidecode

seps = set(
    [
        " ",
        ",",
        ".",
        "!",
        "?",
        "'",
        '"',
        "\n",
        "(",
        ")",
        "#",
        "{",
        "}",
        "[",
        "]",
        "-",
        ";",
        ":",
    ]
)


def out(result):
    for i in result:
        print(f"{i[1]}\t{i[0]}")


if __name__ == "__main__":
    c = defaultdict(int)

    for line in sys.stdin:
        words = line.split(" ")
        for word in words:
            clean_word = []
            for char in word:
                if char not in seps and char:
                    clean_word.append(char)
            r = "".join(clean_word)
            # r = unidecode.unidecode(r)
            if r:
                c[r] += 1

    r = sorted(list(c.items()), key=lambda x: -x[1])
    try:
        out(r)
    except BrokenPipeError as e:
        pass

我这样运行它:

cat /tmp/t.txt | ./venv/bin/python3 src/main.py | head -n 100
  • 平均计算时间为:rust -> 5',python3.8 -> 19'
  • python 版本(我认为)优化较差(整行的分割需要额外的 O(n))
  • 这是单线程进程,并且是一个非常简单的程序
  • 大部分计算时间都在字循环处理中,输出几乎是即时的。
  • 我还删除了删除重音的库代码,以更接近两种语言的标准库。

Question: Rust 的性能“仅”提高约 3-4 倍,这正常吗?

我还想知道我是否在这里遗漏了一些东西,因为我发现“仅”100Mb 数据的计算时间相当长。我不认为(天真地)有一些处理与较低的大 O 对此,我可能是错的。

我习惯于将一些 python 代码与 go、java 或 vlang 中的等效代码进行比较,并且这些工作台的速度通常会提高 20 倍到 100 倍。

也许cpython擅长这种处理,也许我错过了rust程序中的一些东西(我对rust很陌生)以使其更加高效。

我害怕在测试中错过一些重要的东西,但是对此有什么想法吗?

编辑:根据人们的建议,我现在有以下版本:

use std::collections::HashMap;
use std::io;
use std::io::prelude::*;

#[derive(Debug)]
struct Entry<'a> {
    word: &'a str, // word: String,
    count: u32,
}

static SEPARATORS: &'static [char] = &[
    ' ', ',', '.', '!', '?', '\'', '"', '\n', '(', ')', '#', '{', '}', '[', ']', '-', ';', ':',
];

fn main() {
    if let Err(err) = try_main() {
        if err.kind() == std::io::ErrorKind::BrokenPipe {
            return;
        }
        // Ignore any error that may occur while writing to stderr.
        let _ = writeln!(std::io::stderr(), "{}", err);
    }
}

fn try_main() -> Result<(), std::io::Error> {
    let mut words: HashMap<String, u32> = HashMap::new();
    let stdin = io::stdin();
    for result in stdin.lock().lines() {
        let line = result?;
        line_processor(line, &mut words)
    }
    output(&mut words)?;
    Ok(())
}

fn line_processor(line: String, words: &mut HashMap<String, u32>) {
    let mut l = line.as_str();
    loop {
        if let Some(pos) = l.find(|c: char| SEPARATORS.contains(&c)) {
            let (head, tail) = l.split_at(pos);
            add_word(head.to_owned(), words);
            l = &tail[1..];
        } else {
            break;
        }
    }
}

fn add_word(word: String, words: &mut HashMap<String, u32>) {
    if word.len() > 0 {
        let count = words.entry(word).or_insert(0);
        *count += 1;
    }
}

fn output(words: &mut HashMap<String, u32>) -> Result<(), std::io::Error> {
    let mut stack = Vec::<Entry>::new();

    for (k, v) in words {
        stack.push(Entry {
            word: k.as_str(), // word: k.to_string(),
            count: *v,
        });
    }

    stack.sort_by(|a, b| a.count.cmp(&b.count));

    let stdout = io::stdout();
    let mut stdout = stdout.lock();
    while let Some(entry) = stack.pop() {
        writeln!(stdout, "{}\t{}", entry.count, entry.word)?;
    }
    Ok(())
}

现在在我的电脑上大约需要 2.6'。这比 python 版本要好得多,几乎快 10 倍,虽然更好,但仍然没有达到我的预期(这不是一个真正的问题)。可能还有一些我暂时没有想到的其他优化。


您可以通过避免 UTF-8 验证来加快速度,并使用bstr crate.

use std::io;
use std::io::prelude::*;

use bstr::{BStr, BString, io::BufReadExt, ByteSlice};

type HashMap<K, V> = fnv::FnvHashMap<K, V>;

#[derive(Debug)]
struct Entry<'a> {
    word: &'a BStr,
    count: u32,
}

static SEPSET: &'static [u8] = b" ,.!?'\"\n()#{}[]-;:";

fn main() {
    if let Err(err) = try_main() {
        if err.kind() == std::io::ErrorKind::BrokenPipe {
            return;
        }
        // Ignore any error that may occur while writing to stderr.
        let _ = writeln!(std::io::stderr(), "{}", err);
    }
}

fn try_main() -> Result<(), std::io::Error> {
    let mut words: HashMap<BString, u32> = HashMap::default();
    io::stdin().lock().for_byte_line(|line| {
        line_processor(line, &mut words);
        Ok(true)
    })?;
    output(&mut words)?;
    Ok(())
}

fn line_processor(mut line: &[u8], words: &mut HashMap<BString, u32>) {
    loop {
        if let Some(pos) = line.find_byteset(SEPSET) {
            let (head, tail) = line.split_at(pos);
            add_word(head, words);
            line = &tail[1..];
        } else {
            break;
        }
    }
}

fn add_word(word: &[u8], words: &mut HashMap<BString, u32>) {
    if word.len() > 0 {
        // The vast majority of the time we are looking
        // up a word that already exists, so don't bother
        // allocating in the common path. This means the
        // uncommon path does two lookups, but it's so
        // uncommon that the overall result is much faster.
        if let Some(count) = words.get_mut(word.as_bstr()) {
            *count += 1;
        } else {
            words.insert(BString::from(word), 1);
        }
    }
}

fn output(words: &mut HashMap<BString, u32>) -> Result<(), std::io::Error> {
    let mut stack = Vec::<Entry>::new();

    for (k, v) in words {
        stack.push(Entry {
            word: k.as_bstr(),
            count: *v,
        });
    }

    stack.sort_by(|a, b| a.count.cmp(&b.count));

    let stdout = io::stdout();
    let mut stdout = stdout.lock();
    while let Some(entry) = stack.pop() {
        writeln!(stdout, "{}\t{}", entry.count, entry.word)?;
    }
    Ok(())
}

此时,程序的大部分时间都花在了hashmap查找上。 (这就是为什么我改用fnv上面。)因此,此时使其更快可能意味着使用不同的策略来维护单词映射。我的猜测是,大多数单词的长度只有几个字节,因此您可以在特殊情况下使用数组作为映射而不是哈希映射。它可能会带来显着的加速,但也会使您的原始程序变得更加复杂。

至于这个速度是否是人们所期望的,我会说,“对我来说似乎是正确的”。您的程序正在对 1450 万字文档中的每个字执行一项操作。上面的程序在我的机器上大约需要 1.7 秒,这意味着它每秒处理大约 830 万个单词,或者每微秒大约 8.3 个单词。考虑到每个单词都会进行哈希查找并且需要搜索才能找到下一个单词,这似乎是正确的。

本文内容由网友自发贡献,版权归原作者所有,本站不承担相应法律责任。如您发现有涉嫌抄袭侵权的内容,请联系:hwhale#tublm.com(使用前将#替换为@)

Rust 与 Python 程序性能结果问题 的相关文章

随机推荐