AttributeError: 'str' object has no attribute 'ndim' [Python | Keras]
AttributeError: 'str' object has no attribute 'ndim' [Python | Keras]
下面是我的代码,与此处找到的代码基本相同:https://keras.io/examples/generative/lstm_character_level_text_generation/
它已经在所有时期工作了一天,然而,今天它运行但在随机时期出现错误,AttributeError 错误表明字符串没有 ndim 属性,这对于输入的数据没有意义并从第 51-56 行转换为 numpy 数组与之前工作时相同,那么它如何将此数据更改为字符串?在没有篡改输入数据或接收数据的代码的情况下,这在一天中发生了怎样的变化。
def load_file(self, filename):
file = open(filename, 'r')
content = file.read()
file.close()
return content
def sample(self, preds, temperature=1.0):
preds = np.asarray(preds).astype("float64")
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def train(self, epochs, batch_size):
content = self.load_file("data/ABC_cleaned/input.txt")
chars = sorted(list(set(content)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(content) - maxlen, step):
sentences.append(content[i:i+maxlen])
next_chars.append(content[i+maxlen])
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
print(type(x))
model = keras.Sequential()
model.add(input_layer.InputLayer(input_shape=(maxlen, len(chars))))
model.add(layers.LSTM(128))
model.add(layers.Dense(len(chars), activation='softmax'))
optimizer = optimizers.RMSprop(lr=0.01)
model.compile(loss="categorical_crossentropy", optimizer=optimizer)
for epoch in range(epochs):
model.fit(x, y, batch_size=batch_size, epochs=1)
print()
print("Generating text after epoch %d" % epoch)
start_index = np.random.randint(0, len(content) -maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print("...Diversity:", diversity)
generated = ""
sentence = content[start_index:start_index+maxlen]
print('...Generating with seed: "' + sentence + '"')
for i in range(400):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.0
preds = model.predict(x_pred, verbose=0)[0]
next_index = self.sample(preds, diversity)
next_char = indices_char[next_index]
sentence = sentence[1:] + next_char
generated += next_char
print("...Generated: ", generated)
print()
topSeven = []
contentSong = []
fullAbc = ""
count = 0
if "X:" in generated:
index = generated.find("X:")
generated = generated[index:]
genList = generated.split('\n')
for line in genList:
if count > 6:
if line and generated[count+1]:
contentSong.append(line)
else:
contentSong.append(line)
break
if line.startswith(("X:", "T:", "%", "S:", "M:", "L:", "K:")):
topSeven.append(line)
count+=1
if len(topSeven) == 7:
for x in topSeven:
fullAbc += x + "\n"
for x in contentSong:
fullAbc += x + "\n"
with open("good_reels.txt", 'a') as f:
f.write("\n" + fullAbc)
f.close()
break
您在此代码中声明了两次 x
。先到这里
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
这里是第二个:
if len(topSeven) == 7:
for x in topSeven:
fullAbc += x + "\n"
for x in contentSong:
fullAbc += x + "\n"
with open("good_reels.txt", 'a') as f:
f.write("\n" + fullAbc)
f.close()
break
在第一个循环迭代中,x
确实是一个 numpy.ndarray
,它将按预期工作。当它到达第二个声明时,x
现在是 str
,它也会按预期工作。
在第二次循环迭代中,x
当前是 str
,而它期望的是 numpy.ndarray
,它会给出错误。
要修复它,只需将 x
的第二个声明重命名为 c
,例如,甚至删除声明它的循环:
if len(topSeven) == 7:
fullAbc += '\n'.join(topSeven)
fullAbc += '\n'.join(contentSong)
with open("good_reels.txt", 'a') as f:
f.write("\n" + fullAbc)
f.close()
break
下面是我的代码,与此处找到的代码基本相同:https://keras.io/examples/generative/lstm_character_level_text_generation/
它已经在所有时期工作了一天,然而,今天它运行但在随机时期出现错误,AttributeError 错误表明字符串没有 ndim 属性,这对于输入的数据没有意义并从第 51-56 行转换为 numpy 数组与之前工作时相同,那么它如何将此数据更改为字符串?在没有篡改输入数据或接收数据的代码的情况下,这在一天中发生了怎样的变化。
def load_file(self, filename):
file = open(filename, 'r')
content = file.read()
file.close()
return content
def sample(self, preds, temperature=1.0):
preds = np.asarray(preds).astype("float64")
preds = np.log(preds) / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
def train(self, epochs, batch_size):
content = self.load_file("data/ABC_cleaned/input.txt")
chars = sorted(list(set(content)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
maxlen = 40
step = 3
sentences = []
next_chars = []
for i in range(0, len(content) - maxlen, step):
sentences.append(content[i:i+maxlen])
next_chars.append(content[i+maxlen])
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
for i, sentence in enumerate(sentences):
for t, char in enumerate(sentence):
x[i, t, char_indices[char]] = 1
y[i, char_indices[next_chars[i]]] = 1
print(type(x))
model = keras.Sequential()
model.add(input_layer.InputLayer(input_shape=(maxlen, len(chars))))
model.add(layers.LSTM(128))
model.add(layers.Dense(len(chars), activation='softmax'))
optimizer = optimizers.RMSprop(lr=0.01)
model.compile(loss="categorical_crossentropy", optimizer=optimizer)
for epoch in range(epochs):
model.fit(x, y, batch_size=batch_size, epochs=1)
print()
print("Generating text after epoch %d" % epoch)
start_index = np.random.randint(0, len(content) -maxlen - 1)
for diversity in [0.2, 0.5, 1.0, 1.2]:
print("...Diversity:", diversity)
generated = ""
sentence = content[start_index:start_index+maxlen]
print('...Generating with seed: "' + sentence + '"')
for i in range(400):
x_pred = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x_pred[0, t, char_indices[char]] = 1.0
preds = model.predict(x_pred, verbose=0)[0]
next_index = self.sample(preds, diversity)
next_char = indices_char[next_index]
sentence = sentence[1:] + next_char
generated += next_char
print("...Generated: ", generated)
print()
topSeven = []
contentSong = []
fullAbc = ""
count = 0
if "X:" in generated:
index = generated.find("X:")
generated = generated[index:]
genList = generated.split('\n')
for line in genList:
if count > 6:
if line and generated[count+1]:
contentSong.append(line)
else:
contentSong.append(line)
break
if line.startswith(("X:", "T:", "%", "S:", "M:", "L:", "K:")):
topSeven.append(line)
count+=1
if len(topSeven) == 7:
for x in topSeven:
fullAbc += x + "\n"
for x in contentSong:
fullAbc += x + "\n"
with open("good_reels.txt", 'a') as f:
f.write("\n" + fullAbc)
f.close()
break
您在此代码中声明了两次 x
。先到这里
x = np.zeros((len(sentences), maxlen, len(chars)), dtype=np.bool)
y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
这里是第二个:
if len(topSeven) == 7:
for x in topSeven:
fullAbc += x + "\n"
for x in contentSong:
fullAbc += x + "\n"
with open("good_reels.txt", 'a') as f:
f.write("\n" + fullAbc)
f.close()
break
在第一个循环迭代中,x
确实是一个 numpy.ndarray
,它将按预期工作。当它到达第二个声明时,x
现在是 str
,它也会按预期工作。
在第二次循环迭代中,x
当前是 str
,而它期望的是 numpy.ndarray
,它会给出错误。
要修复它,只需将 x
的第二个声明重命名为 c
,例如,甚至删除声明它的循环:
if len(topSeven) == 7:
fullAbc += '\n'.join(topSeven)
fullAbc += '\n'.join(contentSong)
with open("good_reels.txt", 'a') as f:
f.write("\n" + fullAbc)
f.close()
break