转移矩阵中的多个 ngram,概率不加到 1
Multiple ngrams in transition matrix, probability not adding to 1
我正在尝试找到一种方法,使用 python 和 numpy 为给定的文本使用一元组、二元组和三元组制作转换矩阵。每行的概率应等于 1。我先用双字母组做了这个,效果很好:
distinct_words = list(word_dict.keys())
dwc = len(distinct_words)
matrix = np.zeros((dwc, dwc), dtype=np.float)
for i in range(len(distinct_words)):
word = distinct_words[i]
first_word_idx = i
total = 0
for bigram, count in ngrams.items():
word_1, word_2 = bigram.split(" ")
if word_1 == word:
total += count
for bigram, count in ngrams.items():
word_1, word_2 = bigram.split(" ")
if word_1 == word:
second_word_idx = index_dict[word_2]
matrix[first_word_idx,second_word_idx] = count / total
但现在我想添加 unigrams 和 trigrams 并加权它们的概率(trigrams * .6,bigrams * .2,unigrams *.2)。我认为我的 python 不是很简洁,这是一个问题,但我也不知道如何使用多个 n-gram(和权重,尽管老实说权重是次要的)这样我仍然可以获得任何给定行的所有概率加起来为 1。
distinct_words = list(word_dict.keys())
dwc = len(distinct_words)
matrix = np.zeros((dwc, dwc), dtype=np.float)
for i in range(len(distinct_words)):
word = distinct_words[i]
first_word_index = i
bi_total = 0
tri_total=0
tri_prob = 0
bi_prob = 0
uni_prob = word_dict[word] / len(distinct_words)
if i < len(distinct_words)-1:
for trigram, count in trigrams.items():
word_1, word_2, word_3 = trigram.split()
if word_1 + word_2 == word + distinct_words[i+1]:
tri_total += count
for trigram, count in trigrams.items():
word_1, word_2, word_3 = trigram.split()
if word_1 + word_2 == word + distinct_words[i+1]:
second_word_index = index_dict[word_2]
tri_prob = count/bigrams[word_1 + " " + word_2]
for bigram, count in bigrams.items():
word_1, word_2 = bigram.split(" ")
if word_1 == word:
bi_total += count
for bigram, count in bigrams.items():
word_1, word_2 = bigram.split(" ")
if word_1 == word:
second_word_index = index_dict[word_2]
bi_prob = count / bi_total
matrix[first_word_index,second_word_index] = (tri_prob * .4) + (bi_prob * .2) + (word_dict[word]/len(word_dict) *.2)
我正在阅读 this lecture 如何设置我的概率矩阵,它似乎有道理,但我不确定我哪里出错了。
如果有帮助,我的 n_grams 来自于此 - 它只是生成一个 n_gram 作为字符串及其计数的字典。
def get_ngram(words, n):
word_dict = {}
for i, word in enumerate(words):
if i > (n-2):
n_gram = []
for num in range(n):
index = i - num
n_gram.append(words[index])
if len(n_gram) > 1:
formatted_gram = ""
for word in reversed(n_gram):
formatted_gram += word + " "
else:
formatted_gram = n_gram[0]
stripped = formatted_gram.strip() if formatted_gram else n_gram[0]
if stripped in word_dict:
word_dict[stripped] += 1
else:
word_dict[stripped] = 1
return word_dict
我已经实现了一个用于计算单字母组、双字母组和三字母组的示例。您可以使用 zip
轻松加入项目。此外,Counter
用于计算项目,defaultdict
用于项目的概率。 defaultdict
当键未映射到集合中时很重要,returns 为零。否则,您必须添加 if 子句以避免 None
.
from collections import Counter, defaultdict
def calculate_grams(items_list):
# count items in list
counts = Counter()
for item in items_list:
counts[item] += 1
# calculate probabilities, defaultdict returns 0 if not found
prob = defaultdict(float)
for item, count in counts.most_common():
prob[item] = count / len(items_list)
return prob
def calculate_bigrams(words):
# tuple first and second items
return calculate_grams(list(zip(words, words[1:])))
def calculate_trigrams(words):
# tuple first, second and third items
return calculate_grams(list(zip(words, words[1:], words[2:])))
dataset = ['a', 'b', 'b', 'c', 'a', 'a', 'a', 'b', 'e', 'e', 'c']
# create dictionary
dictionary = set(dataset)
print("Dictionary", dictionary)
unigrams = calculate_grams(dataset)
print("Unigrams", unigrams)
bigrams = calculate_bigrams(dataset)
print("Bigrams", bigrams)
trigrams = calculate_trigrams(dataset)
print("Trigrams", trigrams)
# Testing
test_words = ['a', 'b']
print("Testing", test_words)
for c in dictionary:
# calculate each probabilities
unigram_prob = unigrams[c]
bigram_prob = bigrams[(test_words[-1], c)]
trigram_prob = bigrams[(test_words[-2], test_words[-1], c)]
# calculate total probability
prob = .2 * unigram_prob + .2 * bigram_prob + .4 * trigram_prob
print(c, prob)
输出:
Unigrams defaultdict(<class 'float'>, {'a': 0.36363636363636365, 'b': 0.2727272727272727, 'c': 0.18181818181818182, 'e': 0.18181818181818182})
Bigrams defaultdict(<class 'float'>, {('a', 'b'): 0.2, ('a', 'a'): 0.2, ('b', 'b'): 0.1, ('b', 'c'): 0.1, ('c', 'a'): 0.1, ('b', 'e'): 0.1, ('e', 'e'): 0.1, ('e', 'c'): 0.1})
Trigrams defaultdict(<class 'float'>, {('a', 'b', 'b'): 0.1111111111111111, ('b', 'b', 'c'): 0.1111111111111111, ('b', 'c', 'a'): 0.1111111111111111, ('c', 'a', 'a'): 0.1111111111111111, ('a', 'a', 'a'): 0.1111111111111111, ('a', 'a', 'b'): 0.1111111111111111, ('a', 'b', 'e'): 0.1111111111111111, ('b', 'e', 'e'): 0.1111111111111111, ('e', 'e', 'c'): 0.1111111111111111})
Testing ['a', 'b']
e 0.05636363636363637
b 0.07454545454545455
c 0.05636363636363637
a 0.07272727272727274
让我们尝试以最有效的方式在纯 Python 中做到这一点,仅依靠列表和字典推导。
假设我们有一个由 3 个单词“a”、“b”和“c”组成的玩具文本:
np.random.seed(42)
text = " ".join([np.random.choice(list("abc")) for _ in range(100)])
text
'c a c c a a c b c c c c a c b a b b b b a a b b a a a c c c b c b b c
b c c a c a c c a a c b a b b b a b a b c c a c c b a b b b b b b b a
c b b b b b b c c b c a b a a b c a b a a a a c a a a c a a'
那么要生成一元组、二元组和三元组你可以进行如下操作:
unigrams = text.split()
unigram_counts = dict()
for unigram in unigrams:
unigram_counts[unigram] = unigram_counts.get(unigram, 0) +1
bigrams = ["".join(bigram) for bigram in zip(unigrams[:-1], unigrams[1:])]
bigram_counts = dict()
for bigram in bigrams:
bigram_counts[bigram] = bigram_counts.get(bigram, 0) +1
trigrams = ["".join(trigram) for trigram in zip(unigrams[:-2], unigrams[1:-1],unigrams[2:])]
trigram_counts = dict()
for trigram in trigrams:
trigram_counts[trigram] = trigram_counts.get(trigram, 0) +1
合并权重并归一化:
weights = [.2,.2,.6]
dics = [unigram_counts, bigram_counts, trigram_counts]
weighted_counts = {k:v*w for d,w in zip(dics, weights) for k,v in d.items()}
#desired output
freqs = {k:v/sum(weighted_counts.values()) for k,v in weighted_counts.items()}
我们得到了什么:
pprint(freqs)
{'a': 0.06693711967545637,
'aa': 0.02434077079107505,
'aaa': 0.024340770791075043,
...
最后,完整性检查:
print(sum(freqs.values()))
0.999999999999999
可以进一步自定义此代码以包含您的标记化规则,例如,或通过一次循环不同的 gram 使其更短。
我正在尝试找到一种方法,使用 python 和 numpy 为给定的文本使用一元组、二元组和三元组制作转换矩阵。每行的概率应等于 1。我先用双字母组做了这个,效果很好:
distinct_words = list(word_dict.keys())
dwc = len(distinct_words)
matrix = np.zeros((dwc, dwc), dtype=np.float)
for i in range(len(distinct_words)):
word = distinct_words[i]
first_word_idx = i
total = 0
for bigram, count in ngrams.items():
word_1, word_2 = bigram.split(" ")
if word_1 == word:
total += count
for bigram, count in ngrams.items():
word_1, word_2 = bigram.split(" ")
if word_1 == word:
second_word_idx = index_dict[word_2]
matrix[first_word_idx,second_word_idx] = count / total
但现在我想添加 unigrams 和 trigrams 并加权它们的概率(trigrams * .6,bigrams * .2,unigrams *.2)。我认为我的 python 不是很简洁,这是一个问题,但我也不知道如何使用多个 n-gram(和权重,尽管老实说权重是次要的)这样我仍然可以获得任何给定行的所有概率加起来为 1。
distinct_words = list(word_dict.keys())
dwc = len(distinct_words)
matrix = np.zeros((dwc, dwc), dtype=np.float)
for i in range(len(distinct_words)):
word = distinct_words[i]
first_word_index = i
bi_total = 0
tri_total=0
tri_prob = 0
bi_prob = 0
uni_prob = word_dict[word] / len(distinct_words)
if i < len(distinct_words)-1:
for trigram, count in trigrams.items():
word_1, word_2, word_3 = trigram.split()
if word_1 + word_2 == word + distinct_words[i+1]:
tri_total += count
for trigram, count in trigrams.items():
word_1, word_2, word_3 = trigram.split()
if word_1 + word_2 == word + distinct_words[i+1]:
second_word_index = index_dict[word_2]
tri_prob = count/bigrams[word_1 + " " + word_2]
for bigram, count in bigrams.items():
word_1, word_2 = bigram.split(" ")
if word_1 == word:
bi_total += count
for bigram, count in bigrams.items():
word_1, word_2 = bigram.split(" ")
if word_1 == word:
second_word_index = index_dict[word_2]
bi_prob = count / bi_total
matrix[first_word_index,second_word_index] = (tri_prob * .4) + (bi_prob * .2) + (word_dict[word]/len(word_dict) *.2)
我正在阅读 this lecture 如何设置我的概率矩阵,它似乎有道理,但我不确定我哪里出错了。
如果有帮助,我的 n_grams 来自于此 - 它只是生成一个 n_gram 作为字符串及其计数的字典。
def get_ngram(words, n):
word_dict = {}
for i, word in enumerate(words):
if i > (n-2):
n_gram = []
for num in range(n):
index = i - num
n_gram.append(words[index])
if len(n_gram) > 1:
formatted_gram = ""
for word in reversed(n_gram):
formatted_gram += word + " "
else:
formatted_gram = n_gram[0]
stripped = formatted_gram.strip() if formatted_gram else n_gram[0]
if stripped in word_dict:
word_dict[stripped] += 1
else:
word_dict[stripped] = 1
return word_dict
我已经实现了一个用于计算单字母组、双字母组和三字母组的示例。您可以使用 zip
轻松加入项目。此外,Counter
用于计算项目,defaultdict
用于项目的概率。 defaultdict
当键未映射到集合中时很重要,returns 为零。否则,您必须添加 if 子句以避免 None
.
from collections import Counter, defaultdict
def calculate_grams(items_list):
# count items in list
counts = Counter()
for item in items_list:
counts[item] += 1
# calculate probabilities, defaultdict returns 0 if not found
prob = defaultdict(float)
for item, count in counts.most_common():
prob[item] = count / len(items_list)
return prob
def calculate_bigrams(words):
# tuple first and second items
return calculate_grams(list(zip(words, words[1:])))
def calculate_trigrams(words):
# tuple first, second and third items
return calculate_grams(list(zip(words, words[1:], words[2:])))
dataset = ['a', 'b', 'b', 'c', 'a', 'a', 'a', 'b', 'e', 'e', 'c']
# create dictionary
dictionary = set(dataset)
print("Dictionary", dictionary)
unigrams = calculate_grams(dataset)
print("Unigrams", unigrams)
bigrams = calculate_bigrams(dataset)
print("Bigrams", bigrams)
trigrams = calculate_trigrams(dataset)
print("Trigrams", trigrams)
# Testing
test_words = ['a', 'b']
print("Testing", test_words)
for c in dictionary:
# calculate each probabilities
unigram_prob = unigrams[c]
bigram_prob = bigrams[(test_words[-1], c)]
trigram_prob = bigrams[(test_words[-2], test_words[-1], c)]
# calculate total probability
prob = .2 * unigram_prob + .2 * bigram_prob + .4 * trigram_prob
print(c, prob)
输出:
Unigrams defaultdict(<class 'float'>, {'a': 0.36363636363636365, 'b': 0.2727272727272727, 'c': 0.18181818181818182, 'e': 0.18181818181818182})
Bigrams defaultdict(<class 'float'>, {('a', 'b'): 0.2, ('a', 'a'): 0.2, ('b', 'b'): 0.1, ('b', 'c'): 0.1, ('c', 'a'): 0.1, ('b', 'e'): 0.1, ('e', 'e'): 0.1, ('e', 'c'): 0.1})
Trigrams defaultdict(<class 'float'>, {('a', 'b', 'b'): 0.1111111111111111, ('b', 'b', 'c'): 0.1111111111111111, ('b', 'c', 'a'): 0.1111111111111111, ('c', 'a', 'a'): 0.1111111111111111, ('a', 'a', 'a'): 0.1111111111111111, ('a', 'a', 'b'): 0.1111111111111111, ('a', 'b', 'e'): 0.1111111111111111, ('b', 'e', 'e'): 0.1111111111111111, ('e', 'e', 'c'): 0.1111111111111111})
Testing ['a', 'b']
e 0.05636363636363637
b 0.07454545454545455
c 0.05636363636363637
a 0.07272727272727274
让我们尝试以最有效的方式在纯 Python 中做到这一点,仅依靠列表和字典推导。
假设我们有一个由 3 个单词“a”、“b”和“c”组成的玩具文本:
np.random.seed(42)
text = " ".join([np.random.choice(list("abc")) for _ in range(100)])
text
'c a c c a a c b c c c c a c b a b b b b a a b b a a a c c c b c b b c
b c c a c a c c a a c b a b b b a b a b c c a c c b a b b b b b b b a
c b b b b b b c c b c a b a a b c a b a a a a c a a a c a a'
那么要生成一元组、二元组和三元组你可以进行如下操作:
unigrams = text.split()
unigram_counts = dict()
for unigram in unigrams:
unigram_counts[unigram] = unigram_counts.get(unigram, 0) +1
bigrams = ["".join(bigram) for bigram in zip(unigrams[:-1], unigrams[1:])]
bigram_counts = dict()
for bigram in bigrams:
bigram_counts[bigram] = bigram_counts.get(bigram, 0) +1
trigrams = ["".join(trigram) for trigram in zip(unigrams[:-2], unigrams[1:-1],unigrams[2:])]
trigram_counts = dict()
for trigram in trigrams:
trigram_counts[trigram] = trigram_counts.get(trigram, 0) +1
合并权重并归一化:
weights = [.2,.2,.6]
dics = [unigram_counts, bigram_counts, trigram_counts]
weighted_counts = {k:v*w for d,w in zip(dics, weights) for k,v in d.items()}
#desired output
freqs = {k:v/sum(weighted_counts.values()) for k,v in weighted_counts.items()}
我们得到了什么:
pprint(freqs)
{'a': 0.06693711967545637,
'aa': 0.02434077079107505,
'aaa': 0.024340770791075043,
...
最后,完整性检查:
print(sum(freqs.values()))
0.999999999999999
可以进一步自定义此代码以包含您的标记化规则,例如,或通过一次循环不同的 gram 使其更短。