在 R 中创建词云时出错(Error in simple_triplet_matrix: 'i, j, v' different lengths)
Error at creating word cloud in R (Error in simple_triplet_matrix: 'i, j, v' different lengths)
我在 R 中有以下代码来获取最近关于当地市长候选人的推文并创建一个词云:
library(twitteR)
library(ROAuth)
require(RCurl)
library(stringr)
library(tm)
library(ggmap)
library(plyr)
library(dplyr)
library(SnowballC)
library(wordcloud)
(...)
setup_twitter_oauth(...)
N = 10000 #Number of twetts
S = 200 #200Km radius from Natal (Covers the whole Natal area)
candidate = 'Carlos+Eduardo'
#Lists so I can add more cities in future codes
lats = c(-5.7792569)
lons = c(-35.200916)
# Gets the tweets from every city
result = do.call(
rbind,
lapply(
1:length(lats),
function(i) searchTwitter(
candidate,
lang="pt-br",
n=N,
resultType="recent",
geocode=paste(lats[i], lons[i], paste0(S,"km"), sep=",")
)
)
)
# Get the latitude and longitude of each tweet,
# the tweet itself, how many times it was re-twitted and favorited,
# the date and time it was twitted, etc and builds a data frame.
result_lat = sapply(result, function(x) as.numeric(x$getLatitude()))
result_lat = sapply(result_lat, function(z) ifelse(length(z) != 0, z, NA))
result_lon = sapply(result, function(x) as.numeric(x$getLongitude()))
result_lon = sapply(result_lon, function(z) ifelse(length(z) != 0, z, NA))
result_date = lapply(result, function(x) x$getCreated())
result_date = sapply(result_date,
function(x) strftime(x, format="%d/%m/%Y %H:%M%S", tz="UTC")
)
result_text = sapply(result, function(x) x$getText())
result_text = unlist(result_text)
is_retweet = sapply(result, function(x) x$getIsRetweet())
retweeted = sapply(result, function(x) x$getRetweeted())
retweet_count = sapply(result, function(x) x$getRetweetCount())
favorite_count = sapply(result, function(x) x$getFavoriteCount())
favorited = sapply(result, function(x) x$getFavorited())
tweets = data.frame(
cbind(
tweet = result_text,
date = result_date,
lat = result_lat,
lon = result_lon,
is_retweet=is_retweet,
retweeted = retweeted,
retweet_count = retweet_count,
favorite_count = favorite_count,
favorited = favorited
)
)
# World Cloud
#Text stemming require the package ‘SnowballC’.
#https://cran.r-project.org/web/packages/SnowballC/index.html
#Create corpus
corpus = Corpus(VectorSource(tweets$tweet))
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, removeWords, stopwords('portuguese'))
corpus = tm_map(corpus, stemDocument)
wordcloud(corpus, max.words = 50, random.order = FALSE)
但我遇到了这些错误:
Error in simple_triplet_matrix(i = i, j = j, v = as.numeric(v), nrow =
length(allTerms), :
'i, j, v' different lengths
In addition: Warning messages:
1: In doRppAPICall("search/tweets", n, params = params,
retryOnRateLimit = retryOnRateLimit, :
10000 tweets were requested but the API can only return 518
#I understant this one, I cannot get more tweets that exists
2: In mclapply(unname(content(x)), termFreq, control) : all
scheduled cores encountered errors in user code
3: In simple_triplet_matrix(i = i, j = j, v = as.numeric(v), nrow =
length(allTerms), : NAs introduced by coercion
这是我第一次构建词云,我遵循了这样的教程 one。
有办法解决吗?另外就是:tweets$tweet
的class是"factor",是不是要转换一下啊?如果是,我该怎么做?
我认为问题是 wordcloud
没有为 tm 语料库对象定义。安装 quanteda 包,然后试试这个:
plot(quanteda::corpus(corpus), max.words = 50, random.order = FALSE)
我按照这个 tutorial 定义了一个函数来 "clean" 文本,并在构建词云之前创建了一个 TermDocumentMatrix 而不是 stemDocument。它现在工作正常。
我在 R 中有以下代码来获取最近关于当地市长候选人的推文并创建一个词云:
library(twitteR)
library(ROAuth)
require(RCurl)
library(stringr)
library(tm)
library(ggmap)
library(plyr)
library(dplyr)
library(SnowballC)
library(wordcloud)
(...)
setup_twitter_oauth(...)
N = 10000 #Number of twetts
S = 200 #200Km radius from Natal (Covers the whole Natal area)
candidate = 'Carlos+Eduardo'
#Lists so I can add more cities in future codes
lats = c(-5.7792569)
lons = c(-35.200916)
# Gets the tweets from every city
result = do.call(
rbind,
lapply(
1:length(lats),
function(i) searchTwitter(
candidate,
lang="pt-br",
n=N,
resultType="recent",
geocode=paste(lats[i], lons[i], paste0(S,"km"), sep=",")
)
)
)
# Get the latitude and longitude of each tweet,
# the tweet itself, how many times it was re-twitted and favorited,
# the date and time it was twitted, etc and builds a data frame.
result_lat = sapply(result, function(x) as.numeric(x$getLatitude()))
result_lat = sapply(result_lat, function(z) ifelse(length(z) != 0, z, NA))
result_lon = sapply(result, function(x) as.numeric(x$getLongitude()))
result_lon = sapply(result_lon, function(z) ifelse(length(z) != 0, z, NA))
result_date = lapply(result, function(x) x$getCreated())
result_date = sapply(result_date,
function(x) strftime(x, format="%d/%m/%Y %H:%M%S", tz="UTC")
)
result_text = sapply(result, function(x) x$getText())
result_text = unlist(result_text)
is_retweet = sapply(result, function(x) x$getIsRetweet())
retweeted = sapply(result, function(x) x$getRetweeted())
retweet_count = sapply(result, function(x) x$getRetweetCount())
favorite_count = sapply(result, function(x) x$getFavoriteCount())
favorited = sapply(result, function(x) x$getFavorited())
tweets = data.frame(
cbind(
tweet = result_text,
date = result_date,
lat = result_lat,
lon = result_lon,
is_retweet=is_retweet,
retweeted = retweeted,
retweet_count = retweet_count,
favorite_count = favorite_count,
favorited = favorited
)
)
# World Cloud
#Text stemming require the package ‘SnowballC’.
#https://cran.r-project.org/web/packages/SnowballC/index.html
#Create corpus
corpus = Corpus(VectorSource(tweets$tweet))
corpus = tm_map(corpus, removePunctuation)
corpus = tm_map(corpus, removeWords, stopwords('portuguese'))
corpus = tm_map(corpus, stemDocument)
wordcloud(corpus, max.words = 50, random.order = FALSE)
但我遇到了这些错误:
Error in simple_triplet_matrix(i = i, j = j, v = as.numeric(v), nrow = length(allTerms), :
'i, j, v' different lengths
In addition: Warning messages:
1: In doRppAPICall("search/tweets", n, params = params, retryOnRateLimit = retryOnRateLimit, :
10000 tweets were requested but the API can only return 518
#I understant this one, I cannot get more tweets that exists2: In mclapply(unname(content(x)), termFreq, control) : all scheduled cores encountered errors in user code
3: In simple_triplet_matrix(i = i, j = j, v = as.numeric(v), nrow = length(allTerms), : NAs introduced by coercion
这是我第一次构建词云,我遵循了这样的教程 one。
有办法解决吗?另外就是:tweets$tweet
的class是"factor",是不是要转换一下啊?如果是,我该怎么做?
我认为问题是 wordcloud
没有为 tm 语料库对象定义。安装 quanteda 包,然后试试这个:
plot(quanteda::corpus(corpus), max.words = 50, random.order = FALSE)
我按照这个 tutorial 定义了一个函数来 "clean" 文本,并在构建词云之前创建了一个 TermDocumentMatrix 而不是 stemDocument。它现在工作正常。