尝试 运行 cronjob 来构建 Twitter 用户 ID 列表

Trying to run cronjob to build a list of Twitter User IDs

我正在尝试构建 Twitter 用户 ID 列表(还收集生物描述、位置和一些其他参数)。

我想设置一个 cronjob 来让这段代码 运行 每天从 Twitter API 收集信息并持续一个月。

然而,我担心的是,无论何时代码 运行s,我所拥有的当前代码都会收集一组新的 Twitter 用户,并且不会将那组新数据附加到我的数据框(数据框将只是最近收集的 Twitter 用户的列表)。

这里是相关代码->

#creating auth token to sign into Twitter API
token <- create_token(app="**", consumer_key ="*",
consumer_secret = "*", access_token ="*", access_secret = "*")


keyword1 <- search_tweets("**", geocode= lookup_coords("**", apikey =
"*"), n = 2700)
keyword2 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword3 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword4 <-search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword5 <-search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700) 
keyword6 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword7 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword8 <- search_tweets ("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword9 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword10 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 2700)
keyword11 <- search_tweets("**", geocode= lookup_coords("**", apikey = 
"**"), n = 5000)

#creating dataframe with relevant columns from data

users <- c(keyword1$user_id,keyword2$user_id,keyword3$user_id,keyword4$user_id,keyword5$user_id,keyword6$user_id,keyword7$user_id,keyword8$user_id,keyword9$user_id,keyword10$user_id,keyword11$user_id)
screen <- c(keyword1$screen_name,keyword2$screen_name,keyword3$screen_name,keyword4$screen_name,keyword5$screen_name,keyword6$screen_name,keyword7$screen_name,keyword8$screen_name,keyword9$screen_name,keyword10$screen_name,keyword11$screen_name)
followers <- c(keyword1$followers_count,keyword2$followers_count,keyword3$followers_count,keyword4$followers_count,keyword5$followers_count,keyword6$followers_count,keyword7$followers_count,keyword8$followers_count,keyword9$followers_count,keyword10$followers_count,keyword11$followers_count)
place  <- c(keyword1$location,keyword2$location,keyword3$location,keyword4$location,keyword5$location,keyword6$location,keyword7$location,keyword8$location,keyword9$location,keyword10$location,keyword11$location)
tweet_hashtags <- c(keyword1$hashtags,keyword2$hashtags,keyword3$hashtags,keyword4$hashtags,keyword5$hashtags,keyword6$hashtags,keyword7$hashtags,keyword8$hashtags,keyword9$hashtags,keyword10$hashtags,keyword11$hashtags)
descript <- c(keyword1$description,keyword2$description,keyword3$description,keyword4$description,keyword5$description,keyword6$description,keyword7$description,keyword8$description,keyword9$description,keyword10$description,keyword11$description)

frame <- data.frame(users, screen, followers, place)
frame2 <- data.frame(users, screen, followers, place, descript)
unique_frame <- unique(frame)
unique_frame2 <- unique(frame2)
frame2_descr <- unique_frame2$descript

#replace and replace with spaces- cleaning up description
remove1 = gsub("[[:punct:]]"," ",unique_frame2$descript) #remove punctuation marks
remove2 = gsub("[[digit:]]", " ", remove1) #remove digits 
cleaned = iconv(remove2, from= "latin1", to="ASCII", sub=" ") #remove strange symbols

#removing words that are not helpful to analysis but appear a lot
words = c("the", "com", "https", "gmail", "bio", "just","don", "live", "can", "real", "things", "best", "you", "follow", "everything", "believe", "get", "trying", "day","for", "mor", "first", "born","hate", "good","great","high", "rself","back","time", "always", "tweet", "say", "anything", "tweets", "think", "never", "know", "see", "guy","will", "making", "now", "twitter","free", "make", "doesn","one", "chelseafc", "got", "views", "hard", "south", "world", "self","around","fan","addict", "not", "fan", "thing", "when","mor","far","want","give","hop","host","boy","life", "god", "official","alumni","email", "new","king","like","living","change", "ing", "going", "jesus")

cleaned = gsub(paste(words, collapse ='|'), '', cleaned, ignore.case= TRUE)

unique_frame_df <- cbind(unique_frame2, cleaned)
screenName <-  unique_frame_df$screen

#writing dataframe to CSV and saving it in local
write.csv(unique_frame_df, file ="twitter_list*emphasized text*.csv")

只是重申期望的结果是能够 运行 一个 cron 作业,并将新数据附加到我现有的数据帧中

您的代码只会用新的 csv 覆盖现有的 csv。您可以使用 append=TRUE 和 header=FALSE 参数来附加现有数据帧:

write.table(unique_frame_df,file ="twitter_list.csv", header=FALSE, append=TRUE`, sep=',')

请注意陷阱:write.csv 被设计为不灵活以确保创建有效的 csv 文件作为结果。您需要改用 write.table。

您可能还想添加日期戳列以指示收集特定观测值的时间。