R中的并行for循环
Parallel for loop in R
我有 data.frame sent sent$words 中的句子和 pos/neg 中 pos/neg 个单词的字典=35=]wordsDF 数据框 (wordsDF[x,1])。正面词 = 1,负面词 = -1 (wordsDF[x,2])。该 wordsDF 数据框中的单词根据其长度(字符串长度)按降序排序。我将此用于以下功能。
此功能的工作原理:
1) 通过每个句子统计存储在wordsDF中的单词的出现次数
2) 计算情感分数:特定句子中特定单词 (wordsDF) 的出现次数 * 该单词的情感值(正 = 1,负 = -1)
3) 从句子中删除匹配的词以进行另一次迭代。
使用stringr包的原始解决方案:
scoreSentence_01 <- function(sentence){
score <- 0
for(x in 1:nrow(wordsDF)){
count <- str_count(sentence, wordsDF[x,1])
score <- (score + (count * wordsDF[x,2])) # compute score (count * sentValue)
sentence <- str_replace_all(sentence, wordsDF[x,1], " ")
}
score
}
更快的解决方案 - 第 4 行和第 5 行替换原始解决方案中的第 4 行。
scoreSentence_02 <- function(sentence){
score <- 0
for(x in 1:nrow(wordsDF)){
sd <- function(text) {stri_count(text, regex=wordsDF[x,1])}
results <- sapply(sentence, sd, USE.NAMES=F)
score <- (score + (results * wordsDF[x,2])) # compute score (count * sentValue)
sentence <- str_replace_all(sentence, wordsDF[x,1], " ")
}
score
}
调用函数是:
scoreSentence_Score <- scoreSentence_01(sent$words)
实际上,我使用的数据集包含 300.000 个句子和包含正面和负面单词的字典 - 总共 7.000 个单词。这种方法是非常非常慢的,因为我在 R 编程方面的初学者知识我已经结束了我的努力。
谁能帮我,请问如何将此函数重写为矢量化或并行解决方案。非常感谢任何帮助或建议。非常感谢您。
虚拟数据:
sent <- data.frame(words = c("great just great right size and i love this notebook", "benefits great laptop at the top",
"wouldnt bad notebook and very good", "very good quality", "bad orgtop but great",
"great improvement for that great improvement bad product but overall is not good", "notebook is not good but i love batterytop"), user = c(1,2,3,4,5,6,7),
stringsAsFactors=F)
posWords <- c("great","improvement","love","great improvement","very good","good","right","very","benefits",
"extra","benefit","top","extraordinarily","extraordinary","super","benefits super","good","benefits great",
"wouldnt bad")
negWords <- c("hate","bad","not good","horrible")
# Replicate original data.frame - big data simulation (700.000 rows of sentences)
df.expanded <- as.data.frame(replicate(10000,sent$words))
sent <- coredata(sent)[rep(seq(nrow(sent)),10000),]
sent$words <- paste(c(""), sent$words, c(""), collapse = NULL)
rownames(sent) <- NULL
# Ordering words in pos/negWords
wordsDF <- data.frame(words = posWords, value = 1,stringsAsFactors=F)
wordsDF <- rbind(wordsDF,data.frame(words = negWords, value = -1))
wordsDF$lengths <- unlist(lapply(wordsDF$words, nchar))
wordsDF <- wordsDF[order(-wordsDF[,3]),]
wordsDF$words <- paste(c(""), wordsDF$words, c(""), collapse = NULL)
rownames(wordsDF) <- NULL
期望的输出是:
words user scoreSentence_Score
great just great right size and i love this notebook 1 4
benefits great laptop at the top 2 2
wouldnt bad notebook and very good 3 2
very good quality 4 1
bad orgtop but great 5 0
great improvement for that great improvement bad product but overall is not good 6 0
notebook is not good but i love batterytop 7 0
你就不能这样做吗:
library("stringr")
scoreSentence_Score <- str_count(sent$words, wordsDF[,1]) - str_count(sent$words, wordsDF[,2])
好的,既然我知道您必须处理短语和单词...这是另一个尝试。基本上,您必须先拆分您的短语,对它们进行评分,将它们从字符串中删除,然后对您的单词进行评分...
library(stringr)
sent <- data.frame(words = c("great just great right size and i love this notebook", "benefits great laptop at the top",
"wouldnt bad notebook and very good", "very good quality", "bad orgtop but great",
"great improvement for that great improvement bad product but overall is not good", "notebook is not good but i love batterytop"), user = c(1,2,3,4,5,6,7),
stringsAsFactors=F)
posWords <- c("great","improvement","love","great improvement","very good","good","right","very","benefits",
"extra","benefit","top","extraordinarily","extraordinary","super","benefits super","good","benefits great",
"wouldnt bad")
negWords <- c("hate","bad","not good","horrible")
sent$words2 <- sent$words
# split bad into words and phrases...
bad_phrases <- negWords[grepl(" ", negWords)]
bad_words <- negWords[!negWords %in% bad_phrases]
bad_words <- paste0("\b", bad_words, "\b")
pos_phrases <- posWords[grepl(" ", posWords)]
pos_words <- posWords[!posWords %in% pos_phrases]
pos_words <- paste0("\b", pos_words, "\b")
score <- - str_count(sent$words2, paste(bad_phrases, collapse="|"))
sent$words2 <- gsub(paste(bad_phrases, collapse="|"), "", sent$words2)
score <- score + str_count(sent$words2, paste(pos_phrases, collapse="|"))
sent$words2 <- gsub(paste(pos_phrases, collapse="|"), "", sent$words2)
score <- score + str_count(sent$words2, paste(pos_words, collapse="|")) - str_count(sent$words2, paste(bad_words, collapse="|"))
score
我有 data.frame sent sent$words 中的句子和 pos/neg 中 pos/neg 个单词的字典=35=]wordsDF 数据框 (wordsDF[x,1])。正面词 = 1,负面词 = -1 (wordsDF[x,2])。该 wordsDF 数据框中的单词根据其长度(字符串长度)按降序排序。我将此用于以下功能。
此功能的工作原理:
1) 通过每个句子统计存储在wordsDF中的单词的出现次数 2) 计算情感分数:特定句子中特定单词 (wordsDF) 的出现次数 * 该单词的情感值(正 = 1,负 = -1) 3) 从句子中删除匹配的词以进行另一次迭代。
使用stringr包的原始解决方案:
scoreSentence_01 <- function(sentence){
score <- 0
for(x in 1:nrow(wordsDF)){
count <- str_count(sentence, wordsDF[x,1])
score <- (score + (count * wordsDF[x,2])) # compute score (count * sentValue)
sentence <- str_replace_all(sentence, wordsDF[x,1], " ")
}
score
}
更快的解决方案 - 第 4 行和第 5 行替换原始解决方案中的第 4 行。
scoreSentence_02 <- function(sentence){
score <- 0
for(x in 1:nrow(wordsDF)){
sd <- function(text) {stri_count(text, regex=wordsDF[x,1])}
results <- sapply(sentence, sd, USE.NAMES=F)
score <- (score + (results * wordsDF[x,2])) # compute score (count * sentValue)
sentence <- str_replace_all(sentence, wordsDF[x,1], " ")
}
score
}
调用函数是:
scoreSentence_Score <- scoreSentence_01(sent$words)
实际上,我使用的数据集包含 300.000 个句子和包含正面和负面单词的字典 - 总共 7.000 个单词。这种方法是非常非常慢的,因为我在 R 编程方面的初学者知识我已经结束了我的努力。
谁能帮我,请问如何将此函数重写为矢量化或并行解决方案。非常感谢任何帮助或建议。非常感谢您。
虚拟数据:
sent <- data.frame(words = c("great just great right size and i love this notebook", "benefits great laptop at the top",
"wouldnt bad notebook and very good", "very good quality", "bad orgtop but great",
"great improvement for that great improvement bad product but overall is not good", "notebook is not good but i love batterytop"), user = c(1,2,3,4,5,6,7),
stringsAsFactors=F)
posWords <- c("great","improvement","love","great improvement","very good","good","right","very","benefits",
"extra","benefit","top","extraordinarily","extraordinary","super","benefits super","good","benefits great",
"wouldnt bad")
negWords <- c("hate","bad","not good","horrible")
# Replicate original data.frame - big data simulation (700.000 rows of sentences)
df.expanded <- as.data.frame(replicate(10000,sent$words))
sent <- coredata(sent)[rep(seq(nrow(sent)),10000),]
sent$words <- paste(c(""), sent$words, c(""), collapse = NULL)
rownames(sent) <- NULL
# Ordering words in pos/negWords
wordsDF <- data.frame(words = posWords, value = 1,stringsAsFactors=F)
wordsDF <- rbind(wordsDF,data.frame(words = negWords, value = -1))
wordsDF$lengths <- unlist(lapply(wordsDF$words, nchar))
wordsDF <- wordsDF[order(-wordsDF[,3]),]
wordsDF$words <- paste(c(""), wordsDF$words, c(""), collapse = NULL)
rownames(wordsDF) <- NULL
期望的输出是:
words user scoreSentence_Score
great just great right size and i love this notebook 1 4
benefits great laptop at the top 2 2
wouldnt bad notebook and very good 3 2
very good quality 4 1
bad orgtop but great 5 0
great improvement for that great improvement bad product but overall is not good 6 0
notebook is not good but i love batterytop 7 0
你就不能这样做吗:
library("stringr")
scoreSentence_Score <- str_count(sent$words, wordsDF[,1]) - str_count(sent$words, wordsDF[,2])
好的,既然我知道您必须处理短语和单词...这是另一个尝试。基本上,您必须先拆分您的短语,对它们进行评分,将它们从字符串中删除,然后对您的单词进行评分...
library(stringr)
sent <- data.frame(words = c("great just great right size and i love this notebook", "benefits great laptop at the top",
"wouldnt bad notebook and very good", "very good quality", "bad orgtop but great",
"great improvement for that great improvement bad product but overall is not good", "notebook is not good but i love batterytop"), user = c(1,2,3,4,5,6,7),
stringsAsFactors=F)
posWords <- c("great","improvement","love","great improvement","very good","good","right","very","benefits",
"extra","benefit","top","extraordinarily","extraordinary","super","benefits super","good","benefits great",
"wouldnt bad")
negWords <- c("hate","bad","not good","horrible")
sent$words2 <- sent$words
# split bad into words and phrases...
bad_phrases <- negWords[grepl(" ", negWords)]
bad_words <- negWords[!negWords %in% bad_phrases]
bad_words <- paste0("\b", bad_words, "\b")
pos_phrases <- posWords[grepl(" ", posWords)]
pos_words <- posWords[!posWords %in% pos_phrases]
pos_words <- paste0("\b", pos_words, "\b")
score <- - str_count(sent$words2, paste(bad_phrases, collapse="|"))
sent$words2 <- gsub(paste(bad_phrases, collapse="|"), "", sent$words2)
score <- score + str_count(sent$words2, paste(pos_phrases, collapse="|"))
sent$words2 <- gsub(paste(pos_phrases, collapse="|"), "", sent$words2)
score <- score + str_count(sent$words2, paste(pos_words, collapse="|")) - str_count(sent$words2, paste(bad_words, collapse="|"))
score