Python TextBlob 翻译问题
Python TextBlob translate issue
我正在使用 Python、TextBlob 和 NLTK 做一个快速情绪分析控制台应用程序。
目前我正在使用 link 到西班牙语的 wiki 文章,所以我不需要翻译它并且我可以使用 nltk 西班牙语停用词列表,但是如果我想制作这段代码怎么办为不同的语言工作 links?
如果我使用 textFinal=TextBlob(texto)
下面的 TextFinal=TextFinal.translate(to="es")
行(下面的代码),我会收到错误消息,因为它无法将西班牙语翻译成西班牙语。
我可以通过使用 try/catch 来防止这种情况吗?有没有办法让代码尝试根据 link 即时消息提供给应用程序的语言翻译成不同的语言(以及使用不同的停用词列表)?
import nltk
nltk.download('stopwords')
from nltk import word_tokenize
from nltk.corpus import stopwords
import string
from textblob import TextBlob, Word
import urllib.request
from bs4 import BeautifulSoup
response = urllib.request.urlopen('https://es.wikipedia.org/wiki/Valencia')
html = response.read()
soup = BeautifulSoup(html,'html5lib')
text = soup.get_text(strip = True)
tokens = word_tokenize(text)
tokens = [w.lower() for w in tokens]
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words('spanish'))
words = [w for w in words if not w in stop_words]
with open('palabras.txt', 'w') as f:
for word in words:
f.write(" " + word)
with open('palabras.txt', 'r') as myfile:
texto=myfile.read().replace('\n', '')
textFinal=TextBlob(texto)
print (textFinal.sentiment)
freq = nltk.FreqDist(words)
freq.plot(20, cumulative=False)
看看 langdetect 包。您可以检查您输入的页面的语言,如果页面语言与翻译语言匹配,则跳过翻译。类似于以下内容:
import string
import urllib.request
import nltk
from bs4 import BeautifulSoup
from langdetect import detect
from nltk import word_tokenize
from nltk.corpus import stopwords
from textblob import TextBlob, Word
nltk.download("stopwords")
# nltk.download("punkt")
response = urllib.request.urlopen("https://es.wikipedia.org/wiki/Valencia")
html = response.read()
soup = BeautifulSoup(html, "html5lib")
text = soup.get_text(strip=True)
lang = detect(text)
tokens = word_tokenize(text)
tokens = [w.lower() for w in tokens]
table = str.maketrans("", "", string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words("spanish"))
words = [w for w in words if w not in stop_words]
with open("palabras.txt", "w", encoding="utf-8") as f:
for word in words:
f.write(" " + word)
with open("palabras.txt", "r", encoding="utf-8") as myfile:
texto = myfile.read().replace("\n", "")
textFinal = TextBlob(texto)
translate_to = "es"
if lang != translate_to:
textFinal = textFinal.translate(to=translate_to)
print(textFinal.sentiment)
freq = nltk.FreqDist(words)
freq.plot(20, cumulative=False)
我正在使用 Python、TextBlob 和 NLTK 做一个快速情绪分析控制台应用程序。
目前我正在使用 link 到西班牙语的 wiki 文章,所以我不需要翻译它并且我可以使用 nltk 西班牙语停用词列表,但是如果我想制作这段代码怎么办为不同的语言工作 links?
如果我使用 textFinal=TextBlob(texto)
下面的 TextFinal=TextFinal.translate(to="es")
行(下面的代码),我会收到错误消息,因为它无法将西班牙语翻译成西班牙语。
我可以通过使用 try/catch 来防止这种情况吗?有没有办法让代码尝试根据 link 即时消息提供给应用程序的语言翻译成不同的语言(以及使用不同的停用词列表)?
import nltk
nltk.download('stopwords')
from nltk import word_tokenize
from nltk.corpus import stopwords
import string
from textblob import TextBlob, Word
import urllib.request
from bs4 import BeautifulSoup
response = urllib.request.urlopen('https://es.wikipedia.org/wiki/Valencia')
html = response.read()
soup = BeautifulSoup(html,'html5lib')
text = soup.get_text(strip = True)
tokens = word_tokenize(text)
tokens = [w.lower() for w in tokens]
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words('spanish'))
words = [w for w in words if not w in stop_words]
with open('palabras.txt', 'w') as f:
for word in words:
f.write(" " + word)
with open('palabras.txt', 'r') as myfile:
texto=myfile.read().replace('\n', '')
textFinal=TextBlob(texto)
print (textFinal.sentiment)
freq = nltk.FreqDist(words)
freq.plot(20, cumulative=False)
看看 langdetect 包。您可以检查您输入的页面的语言,如果页面语言与翻译语言匹配,则跳过翻译。类似于以下内容:
import string
import urllib.request
import nltk
from bs4 import BeautifulSoup
from langdetect import detect
from nltk import word_tokenize
from nltk.corpus import stopwords
from textblob import TextBlob, Word
nltk.download("stopwords")
# nltk.download("punkt")
response = urllib.request.urlopen("https://es.wikipedia.org/wiki/Valencia")
html = response.read()
soup = BeautifulSoup(html, "html5lib")
text = soup.get_text(strip=True)
lang = detect(text)
tokens = word_tokenize(text)
tokens = [w.lower() for w in tokens]
table = str.maketrans("", "", string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words("spanish"))
words = [w for w in words if w not in stop_words]
with open("palabras.txt", "w", encoding="utf-8") as f:
for word in words:
f.write(" " + word)
with open("palabras.txt", "r", encoding="utf-8") as myfile:
texto = myfile.read().replace("\n", "")
textFinal = TextBlob(texto)
translate_to = "es"
if lang != translate_to:
textFinal = textFinal.translate(to=translate_to)
print(textFinal.sentiment)
freq = nltk.FreqDist(words)
freq.plot(20, cumulative=False)