显示词形还原的进展
Show progress in lemmatization
以下脚本用于对带有文本的给定输入列进行词形还原:
%%time
import pandas as pd
from gensim.utils import lemmatize
from gensim.parsing.preprocessing import STOPWORDS
STOPWORDS = list(STOPWORDS)
data = pd.read_csv('https://pastebin.com/raw/0SEv1RMf')
def lemmatization(s):
result = []
# lowercase, tokenize, remove stopwords, len>3, lemmatize
for token in lemmatize(s, stopwords=STOPWORDS, min_length=3):
result.append(token.decode('utf-8').split('/')[0])
# print(len(result)) <- This didn't work.
return result
X_train = data.apply(lambda r: lemmatization(r['text']), axis=1)
print(X_train)
问题:
如何打印词形还原的进度?
您可以将一个变量传递给词形还原函数以跟踪它被调用的次数 - 然后每 1000 次迭代左右打印一次。我将它包装在下面的列表中,这样 int 就可以通过引用而不是值来传递。
%%time
import pandas as pd
from gensim.utils import lemmatize
from gensim.parsing.preprocessing import STOPWORDS
STOPWORDS = list(STOPWORDS)
data = pd.read_csv('https://pastebin.com/raw/0SEv1RMf')
iteration_count = [0]
def lemmatization(s, iteration_count):
result = []
# lowercase, tokenize, remove stopwords, len>3, lemmatize
for token in lemmatize(s, stopwords=STOPWORDS, min_length=3):
result.append(token.decode('utf-8').split('/')[0])
# print(len(result)) <- This didn't work.
iteration_count[0] += 1
if iteration_count[0] % 1000 == 0:
print(iteration_count[0])
return result
X_train = data.apply(lambda r: lemmatization(r['text'], iteration_count), axis=1)
print(X_train)
以下脚本用于对带有文本的给定输入列进行词形还原:
%%time
import pandas as pd
from gensim.utils import lemmatize
from gensim.parsing.preprocessing import STOPWORDS
STOPWORDS = list(STOPWORDS)
data = pd.read_csv('https://pastebin.com/raw/0SEv1RMf')
def lemmatization(s):
result = []
# lowercase, tokenize, remove stopwords, len>3, lemmatize
for token in lemmatize(s, stopwords=STOPWORDS, min_length=3):
result.append(token.decode('utf-8').split('/')[0])
# print(len(result)) <- This didn't work.
return result
X_train = data.apply(lambda r: lemmatization(r['text']), axis=1)
print(X_train)
问题:
如何打印词形还原的进度?
您可以将一个变量传递给词形还原函数以跟踪它被调用的次数 - 然后每 1000 次迭代左右打印一次。我将它包装在下面的列表中,这样 int 就可以通过引用而不是值来传递。
%%time
import pandas as pd
from gensim.utils import lemmatize
from gensim.parsing.preprocessing import STOPWORDS
STOPWORDS = list(STOPWORDS)
data = pd.read_csv('https://pastebin.com/raw/0SEv1RMf')
iteration_count = [0]
def lemmatization(s, iteration_count):
result = []
# lowercase, tokenize, remove stopwords, len>3, lemmatize
for token in lemmatize(s, stopwords=STOPWORDS, min_length=3):
result.append(token.decode('utf-8').split('/')[0])
# print(len(result)) <- This didn't work.
iteration_count[0] += 1
if iteration_count[0] % 1000 == 0:
print(iteration_count[0])
return result
X_train = data.apply(lambda r: lemmatization(r['text'], iteration_count), axis=1)
print(X_train)