有效计算 python 中的词频
Efficiently count word frequencies in python
我想计算文本文件中所有单词的出现频率。
>>> countInFile('test.txt')
应该return {'aaa':1, 'bbb': 2, 'ccc':1}
如果目标文本文件是这样的:
# test.txt
aaa bbb ccc
bbb
我在 some posts 之后用纯 python 实现了它。但是,我发现由于文件很大(> 1GB),纯python方式是不够的。
我认为借用sklearn的力量是一个候选。
如果让 CountVectorizer 计算每一行的频率,我猜你会通过对每一列求和来获得词频。但是,这听起来有点间接。
用 python 计算文件中单词数的最有效和最直接的方法是什么?
更新
我的(很慢)代码在这里:
from collections import Counter
def get_term_frequency_in_file(source_file_path):
wordcount = {}
with open(source_file_path) as f:
for line in f:
line = line.lower().translate(None, string.punctuation)
this_wordcount = Counter(line.split())
wordcount = add_merge_two_dict(wordcount, this_wordcount)
return wordcount
def add_merge_two_dict(x, y):
return { k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y) }
跳过 CountVectorizer 和 scikit-learn。
该文件可能太大而无法加载到内存中,但我怀疑 python 字典变得太大。对您来说最简单的选择可能是将大文件拆分为 10-20 个较小的文件并扩展您的代码以遍历较小的文件。
这应该足够了。
def countinfile(filename):
d = {}
with open(filename, "r") as fin:
for line in fin:
words = line.strip().split()
for word in words:
try:
d[word] += 1
except KeyError:
d[word] = 1
return d
最简洁的方法是使用 Python 提供的工具。
from future_builtins import map # Only on Python 2
from collections import Counter
from itertools import chain
def countInFile(filename):
with open(filename) as f:
return Counter(chain.from_iterable(map(str.split, f)))
就是这样。 map(str.split, f)
正在制作一个生成器,每行 returns list
s 个单词。包装在 chain.from_iterable
中将其转换为一次生成一个单词的单个生成器。 Counter
接受一个可迭代的输入并计算其中的所有唯一值。最后,你 return
一个类似 dict
的对象(一个 Counter
),它存储所有唯一的单词及其计数,并且在创建过程中,你一次只存储一行数据和总计数,而不是一次整个文件。
理论上,在 Python 2.7 和 3.1 上,您自己循环链式结果并使用 dict
或 collections.defaultdict(int)
进行计数可能会做得更好(因为 Counter
在 Python 中实现,这在某些情况下会使其变慢),但让 Counter
完成工作更简单且更自记录(我的意思是,整个目标都是计数,所以使用一个 Counter
)。除此之外,在 CPython(参考解释器)3.2 和更高版本 Counter
上有一个 C 级加速器用于计算可迭代输入,这将 运行 比你在纯 [=41 中编写的任何东西都快=].
更新:您似乎想要去除标点符号和不区分大小写,所以这是我之前的代码的一个变体:
from string import punctuation
def countInFile(filename):
with open(filename) as f:
linewords = (line.translate(None, punctuation).lower().split() for line in f)
return Counter(chain.from_iterable(linewords))
您的代码 运行 慢得多,因为它正在创建和销毁许多小的 Counter
和 set
对象,而不是 .update
-ing 单个 Counter
每行一次(虽然比我在更新的代码块中给出的稍慢,但至少在比例因子上算法相似)。
一种有效且准确的记忆方式是利用
scikit
中的 CountVectorizer(用于 ngram 提取)
word_tokenize
的 NLTK
numpy
矩阵求和以收集计数
collections.Counter
用于收集计数和词汇
一个例子:
import urllib.request
from collections import Counter
import numpy as np
from nltk import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
# Our sample textfile.
url = 'https://raw.githubusercontent.com/Simdiva/DSL-Task/master/data/DSLCC-v2.0/test/test.txt'
response = urllib.request.urlopen(url)
data = response.read().decode('utf8')
# Note that `ngram_range=(1, 1)` means we want to extract Unigrams, i.e. tokens.
ngram_vectorizer = CountVectorizer(analyzer='word', tokenizer=word_tokenize, ngram_range=(1, 1), min_df=1)
# X matrix where the row represents sentences and column is our one-hot vector for each token in our vocabulary
X = ngram_vectorizer.fit_transform(data.split('\n'))
# Vocabulary
vocab = list(ngram_vectorizer.get_feature_names())
# Column-wise sum of the X matrix.
# It's some crazy numpy syntax that looks horribly unpythonic
# For details, see
# and
counts = X.sum(axis=0).A1
freq_distribution = Counter(dict(zip(vocab, counts)))
print (freq_distribution.most_common(10))
[输出]:
[(',', 32000),
('.', 17783),
('de', 11225),
('a', 7197),
('que', 5710),
('la', 4732),
('je', 4304),
('se', 4013),
('на', 3978),
('na', 3834)]
基本上,你也可以这样做:
from collections import Counter
import numpy as np
from nltk import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
def freq_dist(data):
"""
:param data: A string with sentences separated by '\n'
:type data: str
"""
ngram_vectorizer = CountVectorizer(analyzer='word', tokenizer=word_tokenize, ngram_range=(1, 1), min_df=1)
X = ngram_vectorizer.fit_transform(data.split('\n'))
vocab = list(ngram_vectorizer.get_feature_names())
counts = X.sum(axis=0).A1
return Counter(dict(zip(vocab, counts)))
让我们timeit
:
import time
start = time.time()
word_distribution = freq_dist(data)
print (time.time() - start)
[输出]:
5.257147789001465
请注意,CountVectorizer
也可以获取文件而不是字符串,因此这里不需要将整个文件读入内存。在代码中:
import io
from collections import Counter
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
infile = '/path/to/input.txt'
ngram_vectorizer = CountVectorizer(analyzer='word', ngram_range=(1, 1), min_df=1)
with io.open(infile, 'r', encoding='utf8') as fin:
X = ngram_vectorizer.fit_transform(fin)
vocab = ngram_vectorizer.get_feature_names()
counts = X.sum(axis=0).A1
freq_distribution = Counter(dict(zip(vocab, counts)))
print (freq_distribution.most_common(10))
我没有解码从 url 读取的整个字节,而是处理二进制数据。因为 bytes.translate
期望它的第二个参数是字节串,所以我使用 utf-8 编码 punctuation
。删除标点符号后,我对字节字符串进行 utf-8 解码。
函数 freq_dist
需要一个可迭代对象。这就是我通过 data.splitlines()
的原因。
from urllib2 import urlopen
from collections import Counter
from string import punctuation
from time import time
import sys
from pprint import pprint
url = 'https://raw.githubusercontent.com/Simdiva/DSL-Task/master/data/DSLCC-v2.0/test/test.txt'
data = urlopen(url).read()
def freq_dist(data):
"""
:param data: file-like object opened in binary mode or
sequence of byte strings separated by '\n'
:type data: an iterable sequence
"""
#For readability
#return Counter(word for line in data
# for word in line.translate(
# None,bytes(punctuation.encode('utf-8'))).decode('utf-8').split())
punc = punctuation.encode('utf-8')
words = (word for line in data for word in line.translate(None, punc).decode('utf-8').split())
return Counter(words)
start = time()
word_dist = freq_dist(data.splitlines())
print('elapsed: {}'.format(time() - start))
pprint(word_dist.most_common(10))
输出;
elapsed: 0.806480884552
[(u'de', 11106),
(u'a', 6742),
(u'que', 5701),
(u'la', 4319),
(u'je', 4260),
(u'se', 3938),
(u'\u043d\u0430', 3929),
(u'na', 3623),
(u'da', 3534),
(u'i', 3487)]
似乎 dict
比 Counter
对象更有效。
def freq_dist(data):
"""
:param data: A string with sentences separated by '\n'
:type data: str
"""
d = {}
punc = punctuation.encode('utf-8')
words = (word for line in data for word in line.translate(None, punc).decode('utf-8').split())
for word in words:
d[word] = d.get(word, 0) + 1
return d
start = time()
word_dist = freq_dist(data.splitlines())
print('elapsed: {}'.format(time() - start))
pprint(sorted(word_dist.items(), key=lambda x: (x[1], x[0]), reverse=True)[:10])
输出;
elapsed: 0.642680168152
[(u'de', 11106),
(u'a', 6742),
(u'que', 5701),
(u'la', 4319),
(u'je', 4260),
(u'se', 3938),
(u'\u043d\u0430', 3929),
(u'na', 3623),
(u'da', 3534),
(u'i', 3487)]
为了在打开大文件时提高内存效率,您必须只传递打开的 url。但时间也会包括文件下载时间。
data = urlopen(url)
word_dist = freq_dist(data)
这是一些基准。它看起来很奇怪,但最粗略的代码会胜出。
[代码]:
from collections import Counter, defaultdict
import io, time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
infile = '/path/to/file'
def extract_dictionary_sklearn(file_path):
with io.open(file_path, 'r', encoding='utf8') as fin:
ngram_vectorizer = CountVectorizer(analyzer='word')
X = ngram_vectorizer.fit_transform(fin)
vocab = ngram_vectorizer.get_feature_names()
counts = X.sum(axis=0).A1
return Counter(dict(zip(vocab, counts)))
def extract_dictionary_native(file_path):
dictionary = Counter()
with io.open(file_path, 'r', encoding='utf8') as fin:
for line in fin:
dictionary.update(line.split())
return dictionary
def extract_dictionary_paddle(file_path):
dictionary = defaultdict(int)
with io.open(file_path, 'r', encoding='utf8') as fin:
for line in fin:
for words in line.split():
dictionary[word] +=1
return dictionary
start = time.time()
extract_dictionary_sklearn(infile)
print time.time() - start
start = time.time()
extract_dictionary_native(infile)
print time.time() - start
start = time.time()
extract_dictionary_paddle(infile)
print time.time() - start
[输出]:
38.306814909
24.8241138458
12.1182529926
上述基准测试中使用的数据大小 (154MB):
$ wc -c /path/to/file
161680851
$ wc -l /path/to/file
2176141
一些注意事项:
- 使用
sklearn
版本,创建矢量化器 + numpy 操作和转换为 Counter
对象会产生开销
- 然后原生
Counter
更新版本,好像Counter.update()
是一个昂贵的操作
你可以试试 sklearn
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
data=['i am student','the student suffers a lot']
transformed_data =vectorizer.fit_transform(data)
vocab= {a: b for a, b in zip(vectorizer.get_feature_names(), np.ravel(transformed_data.sum(axis=0)))}
print (vocab)
结合其他人的观点和我自己的一些观点:)
这是我为您准备的
from collections import Counter
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
text='''Note that if you use RegexpTokenizer option, you lose
natural language features special to word_tokenize
like splitting apart contractions. You can naively
split on the regex \w+ without any need for the NLTK.
'''
# tokenize
raw = ' '.join(word_tokenize(text.lower()))
tokenizer = RegexpTokenizer(r'[A-Za-z]{2,}')
words = tokenizer.tokenize(raw)
# remove stopwords
stop_words = set(stopwords.words('english'))
words = [word for word in words if word not in stop_words]
# count word frequency, sort and return just 20
counter = Counter()
counter.update(words)
most_common = counter.most_common(20)
most_common
输出
(全部)
[('note', 1),
('use', 1),
('regexptokenizer', 1),
('option', 1),
('lose', 1),
('natural', 1),
('language', 1),
('features', 1),
('special', 1),
('word', 1),
('tokenize', 1),
('like', 1),
('splitting', 1),
('apart', 1),
('contractions', 1),
('naively', 1),
('split', 1),
('regex', 1),
('without', 1),
('need', 1)]
在效率方面可以做得更好,但如果您不太担心它,那么这段代码是最好的。
我想计算文本文件中所有单词的出现频率。
>>> countInFile('test.txt')
应该return {'aaa':1, 'bbb': 2, 'ccc':1}
如果目标文本文件是这样的:
# test.txt
aaa bbb ccc
bbb
我在 some posts 之后用纯 python 实现了它。但是,我发现由于文件很大(> 1GB),纯python方式是不够的。
我认为借用sklearn的力量是一个候选。
如果让 CountVectorizer 计算每一行的频率,我猜你会通过对每一列求和来获得词频。但是,这听起来有点间接。
用 python 计算文件中单词数的最有效和最直接的方法是什么?
更新
我的(很慢)代码在这里:
from collections import Counter
def get_term_frequency_in_file(source_file_path):
wordcount = {}
with open(source_file_path) as f:
for line in f:
line = line.lower().translate(None, string.punctuation)
this_wordcount = Counter(line.split())
wordcount = add_merge_two_dict(wordcount, this_wordcount)
return wordcount
def add_merge_two_dict(x, y):
return { k: x.get(k, 0) + y.get(k, 0) for k in set(x) | set(y) }
跳过 CountVectorizer 和 scikit-learn。
该文件可能太大而无法加载到内存中,但我怀疑 python 字典变得太大。对您来说最简单的选择可能是将大文件拆分为 10-20 个较小的文件并扩展您的代码以遍历较小的文件。
这应该足够了。
def countinfile(filename):
d = {}
with open(filename, "r") as fin:
for line in fin:
words = line.strip().split()
for word in words:
try:
d[word] += 1
except KeyError:
d[word] = 1
return d
最简洁的方法是使用 Python 提供的工具。
from future_builtins import map # Only on Python 2
from collections import Counter
from itertools import chain
def countInFile(filename):
with open(filename) as f:
return Counter(chain.from_iterable(map(str.split, f)))
就是这样。 map(str.split, f)
正在制作一个生成器,每行 returns list
s 个单词。包装在 chain.from_iterable
中将其转换为一次生成一个单词的单个生成器。 Counter
接受一个可迭代的输入并计算其中的所有唯一值。最后,你 return
一个类似 dict
的对象(一个 Counter
),它存储所有唯一的单词及其计数,并且在创建过程中,你一次只存储一行数据和总计数,而不是一次整个文件。
理论上,在 Python 2.7 和 3.1 上,您自己循环链式结果并使用 dict
或 collections.defaultdict(int)
进行计数可能会做得更好(因为 Counter
在 Python 中实现,这在某些情况下会使其变慢),但让 Counter
完成工作更简单且更自记录(我的意思是,整个目标都是计数,所以使用一个 Counter
)。除此之外,在 CPython(参考解释器)3.2 和更高版本 Counter
上有一个 C 级加速器用于计算可迭代输入,这将 运行 比你在纯 [=41 中编写的任何东西都快=].
更新:您似乎想要去除标点符号和不区分大小写,所以这是我之前的代码的一个变体:
from string import punctuation
def countInFile(filename):
with open(filename) as f:
linewords = (line.translate(None, punctuation).lower().split() for line in f)
return Counter(chain.from_iterable(linewords))
您的代码 运行 慢得多,因为它正在创建和销毁许多小的 Counter
和 set
对象,而不是 .update
-ing 单个 Counter
每行一次(虽然比我在更新的代码块中给出的稍慢,但至少在比例因子上算法相似)。
一种有效且准确的记忆方式是利用
scikit
中的 CountVectorizer(用于 ngram 提取)word_tokenize
的 NLTK
numpy
矩阵求和以收集计数collections.Counter
用于收集计数和词汇
一个例子:
import urllib.request
from collections import Counter
import numpy as np
from nltk import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
# Our sample textfile.
url = 'https://raw.githubusercontent.com/Simdiva/DSL-Task/master/data/DSLCC-v2.0/test/test.txt'
response = urllib.request.urlopen(url)
data = response.read().decode('utf8')
# Note that `ngram_range=(1, 1)` means we want to extract Unigrams, i.e. tokens.
ngram_vectorizer = CountVectorizer(analyzer='word', tokenizer=word_tokenize, ngram_range=(1, 1), min_df=1)
# X matrix where the row represents sentences and column is our one-hot vector for each token in our vocabulary
X = ngram_vectorizer.fit_transform(data.split('\n'))
# Vocabulary
vocab = list(ngram_vectorizer.get_feature_names())
# Column-wise sum of the X matrix.
# It's some crazy numpy syntax that looks horribly unpythonic
# For details, see
# and
counts = X.sum(axis=0).A1
freq_distribution = Counter(dict(zip(vocab, counts)))
print (freq_distribution.most_common(10))
[输出]:
[(',', 32000),
('.', 17783),
('de', 11225),
('a', 7197),
('que', 5710),
('la', 4732),
('je', 4304),
('se', 4013),
('на', 3978),
('na', 3834)]
基本上,你也可以这样做:
from collections import Counter
import numpy as np
from nltk import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
def freq_dist(data):
"""
:param data: A string with sentences separated by '\n'
:type data: str
"""
ngram_vectorizer = CountVectorizer(analyzer='word', tokenizer=word_tokenize, ngram_range=(1, 1), min_df=1)
X = ngram_vectorizer.fit_transform(data.split('\n'))
vocab = list(ngram_vectorizer.get_feature_names())
counts = X.sum(axis=0).A1
return Counter(dict(zip(vocab, counts)))
让我们timeit
:
import time
start = time.time()
word_distribution = freq_dist(data)
print (time.time() - start)
[输出]:
5.257147789001465
请注意,CountVectorizer
也可以获取文件而不是字符串,因此这里不需要将整个文件读入内存。在代码中:
import io
from collections import Counter
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
infile = '/path/to/input.txt'
ngram_vectorizer = CountVectorizer(analyzer='word', ngram_range=(1, 1), min_df=1)
with io.open(infile, 'r', encoding='utf8') as fin:
X = ngram_vectorizer.fit_transform(fin)
vocab = ngram_vectorizer.get_feature_names()
counts = X.sum(axis=0).A1
freq_distribution = Counter(dict(zip(vocab, counts)))
print (freq_distribution.most_common(10))
我没有解码从 url 读取的整个字节,而是处理二进制数据。因为 bytes.translate
期望它的第二个参数是字节串,所以我使用 utf-8 编码 punctuation
。删除标点符号后,我对字节字符串进行 utf-8 解码。
函数 freq_dist
需要一个可迭代对象。这就是我通过 data.splitlines()
的原因。
from urllib2 import urlopen
from collections import Counter
from string import punctuation
from time import time
import sys
from pprint import pprint
url = 'https://raw.githubusercontent.com/Simdiva/DSL-Task/master/data/DSLCC-v2.0/test/test.txt'
data = urlopen(url).read()
def freq_dist(data):
"""
:param data: file-like object opened in binary mode or
sequence of byte strings separated by '\n'
:type data: an iterable sequence
"""
#For readability
#return Counter(word for line in data
# for word in line.translate(
# None,bytes(punctuation.encode('utf-8'))).decode('utf-8').split())
punc = punctuation.encode('utf-8')
words = (word for line in data for word in line.translate(None, punc).decode('utf-8').split())
return Counter(words)
start = time()
word_dist = freq_dist(data.splitlines())
print('elapsed: {}'.format(time() - start))
pprint(word_dist.most_common(10))
输出;
elapsed: 0.806480884552
[(u'de', 11106),
(u'a', 6742),
(u'que', 5701),
(u'la', 4319),
(u'je', 4260),
(u'se', 3938),
(u'\u043d\u0430', 3929),
(u'na', 3623),
(u'da', 3534),
(u'i', 3487)]
似乎 dict
比 Counter
对象更有效。
def freq_dist(data):
"""
:param data: A string with sentences separated by '\n'
:type data: str
"""
d = {}
punc = punctuation.encode('utf-8')
words = (word for line in data for word in line.translate(None, punc).decode('utf-8').split())
for word in words:
d[word] = d.get(word, 0) + 1
return d
start = time()
word_dist = freq_dist(data.splitlines())
print('elapsed: {}'.format(time() - start))
pprint(sorted(word_dist.items(), key=lambda x: (x[1], x[0]), reverse=True)[:10])
输出;
elapsed: 0.642680168152
[(u'de', 11106),
(u'a', 6742),
(u'que', 5701),
(u'la', 4319),
(u'je', 4260),
(u'se', 3938),
(u'\u043d\u0430', 3929),
(u'na', 3623),
(u'da', 3534),
(u'i', 3487)]
为了在打开大文件时提高内存效率,您必须只传递打开的 url。但时间也会包括文件下载时间。
data = urlopen(url)
word_dist = freq_dist(data)
这是一些基准。它看起来很奇怪,但最粗略的代码会胜出。
[代码]:
from collections import Counter, defaultdict
import io, time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
infile = '/path/to/file'
def extract_dictionary_sklearn(file_path):
with io.open(file_path, 'r', encoding='utf8') as fin:
ngram_vectorizer = CountVectorizer(analyzer='word')
X = ngram_vectorizer.fit_transform(fin)
vocab = ngram_vectorizer.get_feature_names()
counts = X.sum(axis=0).A1
return Counter(dict(zip(vocab, counts)))
def extract_dictionary_native(file_path):
dictionary = Counter()
with io.open(file_path, 'r', encoding='utf8') as fin:
for line in fin:
dictionary.update(line.split())
return dictionary
def extract_dictionary_paddle(file_path):
dictionary = defaultdict(int)
with io.open(file_path, 'r', encoding='utf8') as fin:
for line in fin:
for words in line.split():
dictionary[word] +=1
return dictionary
start = time.time()
extract_dictionary_sklearn(infile)
print time.time() - start
start = time.time()
extract_dictionary_native(infile)
print time.time() - start
start = time.time()
extract_dictionary_paddle(infile)
print time.time() - start
[输出]:
38.306814909
24.8241138458
12.1182529926
上述基准测试中使用的数据大小 (154MB):
$ wc -c /path/to/file
161680851
$ wc -l /path/to/file
2176141
一些注意事项:
- 使用
sklearn
版本,创建矢量化器 + numpy 操作和转换为Counter
对象会产生开销 - 然后原生
Counter
更新版本,好像Counter.update()
是一个昂贵的操作
你可以试试 sklearn
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer()
data=['i am student','the student suffers a lot']
transformed_data =vectorizer.fit_transform(data)
vocab= {a: b for a, b in zip(vectorizer.get_feature_names(), np.ravel(transformed_data.sum(axis=0)))}
print (vocab)
结合其他人的观点和我自己的一些观点:) 这是我为您准备的
from collections import Counter
from nltk.tokenize import RegexpTokenizer
from nltk.corpus import stopwords
text='''Note that if you use RegexpTokenizer option, you lose
natural language features special to word_tokenize
like splitting apart contractions. You can naively
split on the regex \w+ without any need for the NLTK.
'''
# tokenize
raw = ' '.join(word_tokenize(text.lower()))
tokenizer = RegexpTokenizer(r'[A-Za-z]{2,}')
words = tokenizer.tokenize(raw)
# remove stopwords
stop_words = set(stopwords.words('english'))
words = [word for word in words if word not in stop_words]
# count word frequency, sort and return just 20
counter = Counter()
counter.update(words)
most_common = counter.most_common(20)
most_common
输出
(全部)
[('note', 1), ('use', 1), ('regexptokenizer', 1), ('option', 1), ('lose', 1), ('natural', 1), ('language', 1), ('features', 1), ('special', 1), ('word', 1), ('tokenize', 1), ('like', 1), ('splitting', 1), ('apart', 1), ('contractions', 1), ('naively', 1), ('split', 1), ('regex', 1), ('without', 1), ('need', 1)]
在效率方面可以做得更好,但如果您不太担心它,那么这段代码是最好的。