为什么 text2vec 中的 LSA 每次都会产生不同的结果?
Why is LSA in text2vec producing different results every time?
我在 text2vec
包中使用潜在语义分析来生成词向量并使用变换来拟合新数据,这时我发现了一些奇怪的东西,在对相同数据进行训练时,空格没有对齐。
该方法似乎存在一些不一致(或随机性?)。也就是说,即使在完全相同的数据 上重新运行 LSA 模型,尽管输入相同,但生成的词向量也大不相同。环顾四周时,我只发现了这些旧的已关闭 github 问题 link link 以及更新日志中关于 LSA 正在清理的提及。我使用 movie_review 数据集和文档中的(稍作修改的)代码重现了该行为:
library(text2vec)
packageVersion("text2vec") # ‘0.5.1’
data("movie_review")
N = 1000
tokens = word_tokenizer(tolower(movie_review$review[1:N]))
it=itoken(tokens)
voc = create_vocabulary(it) %>% prune_vocabulary(term_count_min = 5, doc_proportion_max =0.9)
vectorizer = vocab_vectorizer(voc)
tcm = create_tcm(it, vectorizer)
# edit: make tcm symmetric:
tcm = tcm + Matrix::t(Matrix::triu(tcm))
n_topics = 10
lsa_1 = LatentSemanticAnalysis$new(n_topics)
d1 = lsa_1$fit_transform(tcm)
lsa_2 = LatentSemanticAnalysis$new(n_topics)
d2 = lsa_2$fit_transform(tcm)
# despite being trained on the same data, words have completely different vectors:
sim2(d1["film",,drop=F], d2["film",,drop=F])
# yields values like -0.993363 but sometimes 0.9888435 (should be 1)
mean(diag(sim2(d1, d2)))
# e.g. -0.2316826
hist(diag(sim2(d1, d2)), main="self-similarity between models")
# note: these numbers are different every time!
# But: within each model, results seem consistent and reasonable:
# top similar words for "film":
head(sort(sim2(d1, d1["film",,drop=F])[,1],decreasing = T))
# film movie show piece territory bay
# 1.0000000 0.9873934 0.9803280 0.9732380 0.9680488 0.9668800
# same in the second model:
head(sort(sim2(d2, d2["film",,drop=F])[,1],decreasing = T))
# film movie show piece territory bay
# 1.0000000 0.9873935 0.9803279 0.9732364 0.9680495 0.9668819
# transform works:
sim2(d2["film",,drop=F], transform(tcm["film",,drop=F], lsa_2 )) # yields 1
# LSA in quanteda doesn't have this problem, same data => same vectors
library(quanteda)
d1q = textmodel_lsa(as.dfm(tcm), 10)
d2q = textmodel_lsa(as.dfm(tcm), 10)
mean(diag(sim2(d1q$docs, d2q$docs))) # yields 1
# the top synonyms for "film" are also a bit different with quanteda's LSA
# film movie hunk show territory bay
# 1.0000000 0.9770574 0.9675766 0.9642915 0.9577723 0.9573138
怎么回事,这是一个错误,这是出于某种原因的预期行为,还是我有很大的误解? (我有点希望后者......)。如果是有意为之,为什么 quanteda 的行为会有所不同?
问题是你的矩阵似乎是病态的,因此你有数值稳定性问题。
library(text2vec)
library(magrittr)
data("movie_review")
N = 1000
tokens = word_tokenizer(tolower(movie_review$review[1:N]))
it=itoken(tokens)
voc = create_vocabulary(it) %>% prune_vocabulary(term_count_min = 5, doc_proportion_max =0.9)
vectorizer = vocab_vectorizer(voc)
tcm = create_tcm(it, vectorizer)
# condition number
kappa(tcm)
# Inf
现在,如果您执行截断 SVD(LSA 背后的算法),您会注意到奇异向量非常接近于零:
library(irlba)
truncated_svd = irlba(tcm, 10)
str(truncated_svd)
# $ d : num [1:10] 2139 1444 660 559 425 ...
# $ u : num [1:4387, 1:10] -1.44e-04 -1.62e-04 -7.77e-05 -8.44e-04 -8.99e-04 ...
# $ v : num [1:4387, 1:10] 6.98e-20 2.37e-20 4.09e-20 -4.73e-20 6.62e-20 ...
# $ iter : num 3
# $ mprod: num 50
因此嵌入的符号不稳定,它们之间的余弦角也不稳定。
与在 Python 中的 sklearn 中的工作方式类似,在 R 中使用截断的 SVD 函数具有内置的随机数函数。这既是它对于大型模型构建如此强大但又有点困难的原因较小的用途。如果在创建 SVD 矩阵之前将值设置为种子 set.seed()
,则应该没有问题。这曾经让我在做 LSA 时感到害怕。
如果有帮助请告诉我!
我在 text2vec
包中使用潜在语义分析来生成词向量并使用变换来拟合新数据,这时我发现了一些奇怪的东西,在对相同数据进行训练时,空格没有对齐。
该方法似乎存在一些不一致(或随机性?)。也就是说,即使在完全相同的数据 上重新运行 LSA 模型,尽管输入相同,但生成的词向量也大不相同。环顾四周时,我只发现了这些旧的已关闭 github 问题 link link 以及更新日志中关于 LSA 正在清理的提及。我使用 movie_review 数据集和文档中的(稍作修改的)代码重现了该行为:
library(text2vec)
packageVersion("text2vec") # ‘0.5.1’
data("movie_review")
N = 1000
tokens = word_tokenizer(tolower(movie_review$review[1:N]))
it=itoken(tokens)
voc = create_vocabulary(it) %>% prune_vocabulary(term_count_min = 5, doc_proportion_max =0.9)
vectorizer = vocab_vectorizer(voc)
tcm = create_tcm(it, vectorizer)
# edit: make tcm symmetric:
tcm = tcm + Matrix::t(Matrix::triu(tcm))
n_topics = 10
lsa_1 = LatentSemanticAnalysis$new(n_topics)
d1 = lsa_1$fit_transform(tcm)
lsa_2 = LatentSemanticAnalysis$new(n_topics)
d2 = lsa_2$fit_transform(tcm)
# despite being trained on the same data, words have completely different vectors:
sim2(d1["film",,drop=F], d2["film",,drop=F])
# yields values like -0.993363 but sometimes 0.9888435 (should be 1)
mean(diag(sim2(d1, d2)))
# e.g. -0.2316826
hist(diag(sim2(d1, d2)), main="self-similarity between models")
# note: these numbers are different every time!
# But: within each model, results seem consistent and reasonable:
# top similar words for "film":
head(sort(sim2(d1, d1["film",,drop=F])[,1],decreasing = T))
# film movie show piece territory bay
# 1.0000000 0.9873934 0.9803280 0.9732380 0.9680488 0.9668800
# same in the second model:
head(sort(sim2(d2, d2["film",,drop=F])[,1],decreasing = T))
# film movie show piece territory bay
# 1.0000000 0.9873935 0.9803279 0.9732364 0.9680495 0.9668819
# transform works:
sim2(d2["film",,drop=F], transform(tcm["film",,drop=F], lsa_2 )) # yields 1
# LSA in quanteda doesn't have this problem, same data => same vectors
library(quanteda)
d1q = textmodel_lsa(as.dfm(tcm), 10)
d2q = textmodel_lsa(as.dfm(tcm), 10)
mean(diag(sim2(d1q$docs, d2q$docs))) # yields 1
# the top synonyms for "film" are also a bit different with quanteda's LSA
# film movie hunk show territory bay
# 1.0000000 0.9770574 0.9675766 0.9642915 0.9577723 0.9573138
怎么回事,这是一个错误,这是出于某种原因的预期行为,还是我有很大的误解? (我有点希望后者......)。如果是有意为之,为什么 quanteda 的行为会有所不同?
问题是你的矩阵似乎是病态的,因此你有数值稳定性问题。
library(text2vec)
library(magrittr)
data("movie_review")
N = 1000
tokens = word_tokenizer(tolower(movie_review$review[1:N]))
it=itoken(tokens)
voc = create_vocabulary(it) %>% prune_vocabulary(term_count_min = 5, doc_proportion_max =0.9)
vectorizer = vocab_vectorizer(voc)
tcm = create_tcm(it, vectorizer)
# condition number
kappa(tcm)
# Inf
现在,如果您执行截断 SVD(LSA 背后的算法),您会注意到奇异向量非常接近于零:
library(irlba)
truncated_svd = irlba(tcm, 10)
str(truncated_svd)
# $ d : num [1:10] 2139 1444 660 559 425 ...
# $ u : num [1:4387, 1:10] -1.44e-04 -1.62e-04 -7.77e-05 -8.44e-04 -8.99e-04 ...
# $ v : num [1:4387, 1:10] 6.98e-20 2.37e-20 4.09e-20 -4.73e-20 6.62e-20 ...
# $ iter : num 3
# $ mprod: num 50
因此嵌入的符号不稳定,它们之间的余弦角也不稳定。
与在 Python 中的 sklearn 中的工作方式类似,在 R 中使用截断的 SVD 函数具有内置的随机数函数。这既是它对于大型模型构建如此强大但又有点困难的原因较小的用途。如果在创建 SVD 矩阵之前将值设置为种子 set.seed()
,则应该没有问题。这曾经让我在做 LSA 时感到害怕。
如果有帮助请告诉我!