创建一个 ai 聊天机器人,但出现回溯错误
Creating an ai chatbot, but getting a traceback error
我正在尝试在 python 中创建一个 ai 聊天框。我尝试按照本教程进行操作:https://techwithtim.net/tutorials/ai-chatbot/part-1/ 但我遇到了很多弃用错误和一些回溯错误。
这是代码:
import json
import random
import tensorflow
import tflearn
import numpy
import sys
import pickle
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
nltk.download('punkt')
with open("trainingData.json") as file:
data = json.load(file)
try:
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except:
words = []
labels = []
docs_x = []
docs_y = []
for intent in data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent["tag"])
if intent["tag"] not in labels:
labels.append(intent["tag"])
words = [stemmer.stem(w.lower()) for w in words if w != "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w.lower()) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
tensorflow.reset_default_graph()
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
def chat():
print("Start talking with the bot (type quit to stop)!")
while True:
inp = input("You: ")
if inp.lower() == "quit":
break
results = model.predict([bag_of_words(inp, words)])
results_index = numpy.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
print(random.choice(responses))
chat()
这是我遇到的错误。如何修复弃用错误、回溯错误?
错误正文如下:
Run id: VOB3W4
Log directory: /tmp/tflearn_logs/
---------------------------------
Training samples: 20
Validation samples: 0
--
--
Traceback (most recent call last):
File "script.py", line 91, in <module>
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
File "/usr/local/lib/python2.7/site-packages/tflearn/models/dnn.py", line 216, in fit
callbacks=callbacks)
File "/usr/local/lib/python2.7/site-packages/tflearn/helpers/trainer.py", line 339, in fit
show_metric)
File "/usr/local/lib/python2.7/site-packages/tflearn/helpers/trainer.py", line 816, in _train
tflearn.is_training(True, session=self.session)
File "/usr/local/lib/python2.7/site-packages/tflearn/config.py", line 95, in is_training
tf.get_collection('is_training_ops')[0].eval(session=session)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 731, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 5579, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1096, in _run
raise RuntimeError('Attempted to use a closed Session.')
RuntimeError: Attempted to use a closed Session.
开始时文件 "model.tflearn"
不存在,当代码尝试加载此文件时 try/except
应该捕获错误 运行 fit()
和 save()
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
但似乎这个错误关闭了 tf.session()
,所以它不能 运行 fit()
正确。
如果用load()
删除try/except
,只保留fit()
和save()
,那么创建模型并保存在文件中没有问题。
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
创建文件 "model.ftlearn"
后,您可以再次使用 try/except
和 load()
,如果您不使用模型删除文件,它应该可以工作。
更好的解决方案应该检查文件是否存在 - 但它将数据保存在少数文件 "model.tflearn.index"
、"model.tflearn.meta"
和 "model.tflearn.data-00000-of-00001"
中,因此它应该检查其中一个文件而不是 "model.tflearn"
使用
import os
if os.path.exists("model.tflearn.meta"):
model.load("model.tflearn")
else:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
而不是
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
编辑: 看来这个问题至少存在了 2 年:RuntimeError: Attempted to use a closed Session in tflearn
尝试这样做:
try:
model.load("model3.tflearn")
except:
model = tflearn.DNN(net)
model.fit(training,output, n_epoch = 1000, batch_size = 8, show_metric = True)
model.save("model3.tflearn")
我正在尝试在 python 中创建一个 ai 聊天框。我尝试按照本教程进行操作:https://techwithtim.net/tutorials/ai-chatbot/part-1/ 但我遇到了很多弃用错误和一些回溯错误。 这是代码:
import json
import random
import tensorflow
import tflearn
import numpy
import sys
import pickle
import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()
nltk.download('punkt')
with open("trainingData.json") as file:
data = json.load(file)
try:
with open("data.pickle", "rb") as f:
words, labels, training, output = pickle.load(f)
except:
words = []
labels = []
docs_x = []
docs_y = []
for intent in data["intents"]:
for pattern in intent["patterns"]:
wrds = nltk.word_tokenize(pattern)
words.extend(wrds)
docs_x.append(wrds)
docs_y.append(intent["tag"])
if intent["tag"] not in labels:
labels.append(intent["tag"])
words = [stemmer.stem(w.lower()) for w in words if w != "?"]
words = sorted(list(set(words)))
labels = sorted(labels)
training = []
output = []
out_empty = [0 for _ in range(len(labels))]
for x, doc in enumerate(docs_x):
bag = []
wrds = [stemmer.stem(w.lower()) for w in doc]
for w in words:
if w in wrds:
bag.append(1)
else:
bag.append(0)
output_row = out_empty[:]
output_row[labels.index(docs_y[x])] = 1
training.append(bag)
output.append(output_row)
training = numpy.array(training)
output = numpy.array(output)
with open("data.pickle", "wb") as f:
pickle.dump((words, labels, training, output), f)
tensorflow.reset_default_graph()
net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)
model = tflearn.DNN(net)
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
def bag_of_words(s, words):
bag = [0 for _ in range(len(words))]
s_words = nltk.word_tokenize(s)
s_words = [stemmer.stem(word.lower()) for word in s_words]
for se in s_words:
for i, w in enumerate(words):
if w == se:
bag[i] = 1
return numpy.array(bag)
def chat():
print("Start talking with the bot (type quit to stop)!")
while True:
inp = input("You: ")
if inp.lower() == "quit":
break
results = model.predict([bag_of_words(inp, words)])
results_index = numpy.argmax(results)
tag = labels[results_index]
for tg in data["intents"]:
if tg['tag'] == tag:
responses = tg['responses']
print(random.choice(responses))
chat()
这是我遇到的错误。如何修复弃用错误、回溯错误?
错误正文如下:
Run id: VOB3W4
Log directory: /tmp/tflearn_logs/
---------------------------------
Training samples: 20
Validation samples: 0
--
--
Traceback (most recent call last):
File "script.py", line 91, in <module>
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
File "/usr/local/lib/python2.7/site-packages/tflearn/models/dnn.py", line 216, in fit
callbacks=callbacks)
File "/usr/local/lib/python2.7/site-packages/tflearn/helpers/trainer.py", line 339, in fit
show_metric)
File "/usr/local/lib/python2.7/site-packages/tflearn/helpers/trainer.py", line 816, in _train
tflearn.is_training(True, session=self.session)
File "/usr/local/lib/python2.7/site-packages/tflearn/config.py", line 95, in is_training
tf.get_collection('is_training_ops')[0].eval(session=session)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 731, in eval
return _eval_using_default_session(self, feed_dict, self.graph, session)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/framework/ops.py", line 5579, in _eval_using_default_session
return session.run(tensors, feed_dict)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 950, in run
run_metadata_ptr)
File "/usr/local/lib/python2.7/site-packages/tensorflow/python/client/session.py", line 1096, in _run
raise RuntimeError('Attempted to use a closed Session.')
RuntimeError: Attempted to use a closed Session.
开始时文件 "model.tflearn"
不存在,当代码尝试加载此文件时 try/except
应该捕获错误 运行 fit()
和 save()
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
但似乎这个错误关闭了 tf.session()
,所以它不能 运行 fit()
正确。
如果用load()
删除try/except
,只保留fit()
和save()
,那么创建模型并保存在文件中没有问题。
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
创建文件 "model.ftlearn"
后,您可以再次使用 try/except
和 load()
,如果您不使用模型删除文件,它应该可以工作。
更好的解决方案应该检查文件是否存在 - 但它将数据保存在少数文件 "model.tflearn.index"
、"model.tflearn.meta"
和 "model.tflearn.data-00000-of-00001"
中,因此它应该检查其中一个文件而不是 "model.tflearn"
使用
import os
if os.path.exists("model.tflearn.meta"):
model.load("model.tflearn")
else:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
而不是
try:
model.load("model.tflearn")
except:
model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
model.save("model.tflearn")
编辑: 看来这个问题至少存在了 2 年:RuntimeError: Attempted to use a closed Session in tflearn
尝试这样做:
try:
model.load("model3.tflearn")
except:
model = tflearn.DNN(net)
model.fit(training,output, n_epoch = 1000, batch_size = 8, show_metric = True)
model.save("model3.tflearn")