使用 content_transformer 和 udpipe_annotate

Using content_transformer with udpipe_annotate

所以我刚刚发现 udpipe 有一种很棒的显示相关性的方法,所以我开始研究它。如果我在导入后在 csv 文件上使用它并且不对其进行任何更改,this site 中的代码将完美运行。

但是当我创建一个语料库并且我 change/remove 一些单词时,我的问题就出现了。我不是 R 方面的专家,但我在谷歌上搜索了很多,但我似乎无法弄明白。

这是我的代码:

txt <- read_delim(fileName, ";", escape_double = FALSE, trim_ws = TRUE)

# Maak Corpus
docs <- Corpus(VectorSource(txt))
docs <- tm_map(docs, tolower)
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, stripWhitespace)
docs <- tm_map(docs, removeWords, stopwords('nl'))
docs <- tm_map(docs, removeWords, myWords())
docs <- tm_map(docs, content_transformer(gsub), pattern = "afspraak|afspraken|afgesproken", replacement = "afspraak")
docs <- tm_map(docs, content_transformer(gsub), pattern = "communcatie|communiceren|communicatie|comminicatie|communiceer|comuniseren|comunuseren|communictatie|comminiceren|comminisarisacie|communcaite", replacement = "communicatie")
docs <- tm_map(docs, content_transformer(gsub), pattern = "contact|kontact|kontakt", replacement = "contact")

comments <- docs

library(lattice)
stats <- txt_freq(x$upos)
stats$key <- factor(stats$key, levels = rev(stats$key))
#barchart(key ~ freq, data = stats, col = "cadetblue", main = "UPOS (Universal Parts of Speech)\n frequency of occurrence", xlab = "Freq")

## NOUNS (zelfstandige naamwoorden)
stats <- subset(x, upos %in% c("NOUN")) 
stats <- txt_freq(stats$token)
stats$key <- factor(stats$key, levels = rev(stats$key))
barchart(key ~ freq, data = head(stats, 20), col = "cadetblue", main = "Most occurring nouns", xlab = "Freq")

## ADJECTIVES (bijvoeglijke naamwoorden)
stats <- subset(x, upos %in% c("ADJ")) 
stats <- txt_freq(stats$token)
stats$key <- factor(stats$key, levels = rev(stats$key))
barchart(key ~ freq, data = head(stats, 20), col = "cadetblue", main = "Most occurring adjectives", xlab = "Freq")

## Using RAKE (harkjes)
stats <- keywords_rake(x = x, term = "lemma", group = "doc_id", relevant = x$upos %in% c("NOUN", "ADJ"))
stats$key <- factor(stats$keyword, levels = rev(stats$keyword))
barchart(key ~ rake, data = head(subset(stats, freq > 3), 20), col = "cadetblue", main = "Keywords identified by RAKE", xlab = "Rake")

## Using Pointwise Mutual Information Collocations
x$word <- tolower(x$token)
stats <- keywords_collocation(x = x, term = "word", group = "doc_id")
stats$key <- factor(stats$keyword, levels = rev(stats$keyword))
barchart(key ~ pmi, data = head(subset(stats, freq > 3), 20), col = "cadetblue", main = "Keywords identified by PMI Collocation", xlab = "PMI (Pointwise Mutual Information)")

## Using a sequence of POS tags (noun phrases / verb phrases)
x$phrase_tag <- as_phrasemachine(x$upos, type = "upos")
stats <- keywords_phrases(x = x$phrase_tag, term = tolower(x$token), pattern = "(A|N)*N(P+D*(A|N)*N)*", is_regex = TRUE, detailed = FALSE)
stats <- subset(stats, ngram > 1 & freq > 3)
stats$key <- factor(stats$keyword, levels = rev(stats$keyword))
barchart(key ~ freq, data = head(stats, 20), col = "cadetblue", main = "Keywords - simple noun phrases", xlab = "Frequency")


cooc <- cooccurrence(x = subset(x, upos %in% c("NOUN", "ADJ")), 
                                         term = "lemma", 
                                         group = c("doc_id", "paragraph_id", "sentence_id"))
head(cooc)
library(igraph)
library(ggraph)
library(ggplot2)
wordnetwork <- head(cooc, 30)
wordnetwork <- graph_from_data_frame(wordnetwork)
ggraph(wordnetwork, layout = "fr") +
    geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "pink") +
    geom_node_text(aes(label = name), col = "darkgreen", size = 4) +
    theme_graph(base_family = "Arial Narrow") +
    theme(legend.position = "none") +
    labs(title = "Cooccurrences within sentence", subtitle = "Nouns & Adjective")

我一将导入的文件转换为语料库,就失败了。任何人都知道我如何仍然可以执行 tm_map 函数然后 运行 udpipe 代码?

提前发送!

你想要的有多种解决方案。但是由于您的语料库是使用 vectorsource 创建的,因此它只是一个长输入向量。这你可以很容易地回到一个向量中,所以 udpipe 可以接管。

udpipe 示例文档中,所有内容都定义为 x,因此我将执行相同的操作。清理语料库后,只需执行以下操作:

x <- as.character(docs[1])

docs 后面的 [1] 很重要,否则您会得到一些不需要的额外字符。完成后,运行 udpipe 命令将矢量转换为您需要的 data.frame。

x <- udpipe_annotate(ud_model, x)
x <- as.data.frame(x)

另一种方法是先将语料库(查看 ?writeCorpus 了解更多信息)写入磁盘,然后再次读取清理后的文件并通过 udpipe 放入。这更像是一种解决方法,但可能会带来更好的工作流程。

udpipe 还处理标点符号,它在称为 PUNCT 的特殊 upos class 中放入 xpos 描述(如果您使用荷兰语模型,则为荷兰语)Punc|komma 或 unc|punt。 如果名词有大写字母,则词条将小写。

在你的情况下,我将只使用基本的正则表达式选项来浏览数据,而不是使用 tm。荷兰语停用词只是删除了一些动词,如 "zijn"、"worden" en "kunnen" en 一些副词如 "te" 和代词如 "ik" 和 "we"。这些你在你的 udpipe 代码中过滤掉了,因为你只看名词和形容词。