如何修复 python 聊天机器人代码问题 'Attempted to use a closed Session.'

How to fix the python chatbot code problem 'Attempted to use a closed Session.'

我正在按照 techwithtime 的说明在 Python 中制作一个简单的聊天机器人。我能够解决它,因为它几乎没有代码问题,但它仍然给我以下错误,我无法修复。有人可以帮我解决这个问题吗?

错误

ValueError                                Traceback (most recent call last)
<ipython-input-35-0fc2c0bcffad> in <module>()
     78 try:
---> 79     model.load("model.tflearn")
     80 except:

11 frames
ValueError: The passed save_path is not a valid checkpoint: /content/model.tflearn

During handling of the above exception, another exception occurred:

RuntimeError                              Traceback (most recent call last)
/usr/local/lib/python3.7/dist-packages/tensorflow/python/client/session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
   1112     # Check session.
   1113     if self._closed:
-> 1114       raise RuntimeError('Attempted to use a closed Session.')
   1115     if self.graph.version == 0:
   1116       raise RuntimeError('The Session graph is empty.  Add operations to the '

RuntimeError: Attempted to use a closed Session.

intents.json

{"intents": [
        {"tag": "greeting",
         "patterns": ["Hi", "How are you", "Is anyone there?", "Hello", "Good day", "Whats up"],
         "responses": ["Hello!", "Good to see you again!", "Hi there, how can I help?"],
         "context_set": ""
        },
        {"tag": "goodbye",
         "patterns": ["cya", "See you later", "Goodbye", "I am Leaving", "Have a Good day"],
         "responses": ["Sad to see you go :(", "Talk to you later", "Goodbye!"],
         "context_set": ""
        },
        {"tag": "age",
         "patterns": ["how old", "how old is tim", "what is your age", "how old are you", "age?"],
         "responses": ["I am 18 years old!", "18 years young!"],
         "context_set": ""
        },
        {"tag": "name",
         "patterns": ["what is your name", "what should I call you", "whats your name?"],
         "responses": ["You can call me Tim.", "I'm Tim!", "I'm Tim aka Tech With Tim."],
         "context_set": ""
        },
        {"tag": "shop",
         "patterns": ["Id like to buy something", "whats on the menu", "what do you reccommend?", "could i get something to eat"],
         "responses": ["We sell chocolate chip cookies for !", "Cookies are on the menu!"],
         "context_set": ""
        },
        {"tag": "hours",
         "patterns": ["when are you guys open", "what are your hours", "hours of operation"],
         "responses": ["We are open 7am-4pm Monday-Friday!"],
         "context_set": ""
        }
   ]
}

代码

import nltk
from nltk.stem.lancaster import LancasterStemmer
stemmer = LancasterStemmer()

import numpy
import tflearn
import tensorflow as tf
import random
import json
import pickle
nltk.download('punkt')

with open('/content/sample_data/intents.json') as file:
    data = json.load(file)

try:
    with open("data.pickle", "rb") as f:
        words, labels, training, output = pickle.load(f)
except:
    words = []
    labels = []
    docs_x = []
    docs_y = []

    for intent in data["intents"]:
        for pattern in intent["patterns"]:
            wrds = nltk.word_tokenize(pattern)
            words.extend(wrds)
            docs_x.append(wrds)
            docs_y.append(intent["tag"])

        if intent["tag"] not in labels:
            labels.append(intent["tag"])

    words = [stemmer.stem(w.lower()) for w in words if w != "?"]
    words = sorted(list(set(words)))

    labels = sorted(labels)

    training = []
    output = []

    out_empty = [0 for _ in range(len(labels))]

    for x, doc in enumerate(docs_x):
        bag = []

        wrds = [stemmer.stem(w.lower()) for w in doc]

        for w in words:
            if w in wrds:
                bag.append(1)
            else:
                bag.append(0)

        output_row = out_empty[:]
        output_row[labels.index(docs_y[x])] = 1

        training.append(bag)
        output.append(output_row)


    training = numpy.array(training)
    output = numpy.array(output)

    with open("data.pickle", "wb") as f:
        pickle.dump((words, labels, training, output), f)

tf.compat.v1.reset_default_graph()

net = tflearn.input_data(shape=[None, len(training[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(output[0]), activation="softmax")
net = tflearn.regression(net)

model = tflearn.DNN(net)

try:
    model.load("model.tflearn")
except:
    model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
    model.save("model.tflearn")


def bag_of_words(s, words):
    bag = [0 for _ in range(len(words))]

    s_words = nltk.word_tokenize(s)
    s_words = [stemmer.stem(word.lower()) for word in s_words]

    for se in s_words:
        for i, w in enumerate(words):
            if w == se:
                bag[i] = 1
            
    return numpy.array(bag)


def chat():
    print("Start talking with the bot (type quit to stop)!")
    while True:
        inp = input("You: ")
        if inp.lower() == "quit":
            break

        results = model.predict([bag_of_words(inp, words)])
        results_index = numpy.argmax(results)
        tag = labels[results_index]

        if results[results_index] >0.7:
          for tg in data["intents"]:
            if tg['tag'] == tag:
                responses = tg['responses']
            print(random.choice(responses))
        
        else:
          print ('I did not get you')
chat()

为什么会这样? 关注link

self.trainer.restore(model_file, weights_only, **optargs)
self.session = self.trainer.session

self.trainer.restore发生异常时,self.trainer.session无法分配给dnn会话,dnn会话在异常发生前关闭

添加此行 model = tflearn.DNN(net) 在 model.load() 失败时出现异常

try:
    model.load("model.tflearn")
except:
    model = tflearn.DNN(net)
    model.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)
    model.save("model.tflearn")

结果是二维列表,所以正确的代码应该是这样的

if results[0][results_index] >0.7:
    for tg in data["intents"]:
        if tg['tag'] == tag:
            responses = tg['responses']
            print(random.choice(responses))