我怎样才能提高我的神经网络精度(说话人识别 - MFCC)
How can I improve my Neural Network accucary ( Speaker Recognition - MFCC )
我正在研究说话人识别神经网络。
我正在做的是获取 wav 文件 [Bing Bang Theory first espiode :-)],然后将其转换为 MFCC 系数,然后将其作为开源的输入 api 的神经网络 (MLPClassifier) 作为输出,我为每个说话者定义了一个唯一的向量(比方说:[1,0,0,0] - sheldon;[0,1,0,0] - Penny;等等。 ..),我取 50 个随机值进行测试,其他的用于拟合(训练)
这是我的代码,一开始我得到了关于 NN 的随机准确率,但在一些了不起的人的帮助下我将它改进到 ~42% 但我想要更多 :) 大约 70% :
from sklearn.neural_network import MLPClassifier
import python_speech_features
import scipy.io.wavfile as wav
import numpy as np
from os import listdir
from os.path import isfile, join
from random import shuffle
import matplotlib.pyplot as plt
from tqdm import tqdm
from random import randint
import random
winner = [] # this array count how much Bingo we had when we test the NN
random_winner = []
win_len = 0.04 # in seconds
step = win_len / 2
nfft = 2048
for TestNum in tqdm(range(20)): # in every round we build NN with X,Y that out of them we check 50 after we build the NN
X = []
Y = []
onlyfiles = [f for f in listdir("FinalAudios/") if isfile(join("FinalAudios/", f))] # Files in dir
names = [] # names of the speakers
for file in onlyfiles: # for each wav sound
# UNESSECERY TO UNDERSTAND THE CODE
if " " not in file.split("_")[0]:
names.append(file.split("_")[0])
else:
names.append(file.split("_")[0].split(" ")[0])
only_speakers = [] + names
#print only_speakers
names = list(dict.fromkeys(names)) # names of speakers
print names
vector_names = [] # vector for each name
i = 0
vector_for_each_name = [0] * len(names)
for name in names:
vector_for_each_name[i] += 1
vector_names.append(np.array(vector_for_each_name))
vector_for_each_name[i] -= 1
i += 1
for f in onlyfiles:
if " " not in f.split("_")[0]:
f_speaker = f.split("_")[0]
else:
f_speaker = f.split("_")[0].split(" ")[0]
fs, audio = wav.read("FinalAudios/" + f) # read the file
try:
mfcc_feat = python_speech_features.mfcc(audio, samplerate=fs, winlen=win_len,
winstep=step, nfft=nfft, appendEnergy=False)
flat_list = [item for sublist in mfcc_feat for item in sublist]
X.append(np.array(flat_list))
Y.append(np.array(vector_names[names.index(f_speaker)]))
except IndexError:
pass
Z = list(zip(X, Y))
shuffle(Z) # WE SHUFFLE X,Y TO PERFORM RANDOM ON THE TEST LEVEL
X, Y = zip(*Z)
X = list(X)
Y = list(Y)
X = np.asarray(X)
Y = np.asarray(Y)
Y_test = Y[:50] # CHOOSE 50 FOR TEST, OTHERS FOR TRAIN
X_test = X[:50]
X = X[50:]
Y = Y[50:]
print len(X)
clf = MLPClassifier(solver='lbfgs', alpha=3e-2, hidden_layer_sizes=(50, 20), random_state=2) # create the NN
clf.fit(X, Y) # Train it
print list(clf.predict_proba([X[0]])[0])
print list(Y_test[0])
for sample in range(len(X_test)): # add 1 to winner array if we correct and 0 if not, than in the end it plot it
arr = list(clf.predict([X_test[sample]])[0])
if arr.index(max(arr)) == list(Y_test[sample]).index(1):
winner.append(1)
else:
winner.append(0)
if only_speakers[randint(0, len(only_speakers) - 1)] == only_speakers[randint(0, len(only_speakers) - 1)]:
random_winner.append(1)
else:
random_winner.append(0)
# plot winner
plot_x = []
plot_y = []
for i in range(1, len(winner)):
plot_y.append(sum(winner[0:i])*1.0/len(winner[0:i]))
plot_x.append(i)
plot_random_x = []
plot_random_y = []
for i in range(1, len(random_winner)):
plot_random_y.append(sum(random_winner[0:i])*1.0/len(random_winner[0:i]))
plot_random_x.append(i)
plt.plot(plot_x, plot_y, 'r', label='machine learning')
plt.plot(plot_random_x, plot_random_y, 'b', label='random')
plt.xlabel('Number Of Samples')
# naming the y axis
plt.ylabel('Success Rate')
# giving a title to my graph
plt.title('Success Rate : Random Vs ML!')
# function to show the plot
plt.show()
这是我的 zip 文件,其中包含代码和音频文件:https://ufile.io/eggjm1gw
有人知道我怎样才能提高我的准确性?
编辑:
我改进了我的数据集并放入了卷积模型,得到了 60% 的准确率,这还可以,但也不是很好
import python_speech_features
import scipy.io.wavfile as wav
import numpy as np
from os import listdir
import os
import shutil
from os.path import isfile, join
from random import shuffle
from matplotlib import pyplot
from tqdm import tqdm
from random import randint
import tensorflow as tf
from ast import literal_eval as str2arr
from tempfile import TemporaryFile
#win_len = 0.04 # in seconds
#step = win_len / 2
#nfft = 2048
win_len = 0.05 # in seconds
step = win_len
nfft = 16384
results = []
outfile_x = None
outfile_y = None
winner = []
for TestNum in tqdm(range(40)): # We check it several times
if not outfile_x: # if path not exist we create it
X = [] # inputs
Y = [] # outputs
onlyfiles = [f for f in listdir("FinalAudios") if isfile(join("FinalAudios", f))] # Files in dir
names = [] # names of the speakers
for file in onlyfiles: # for each wav sound
# UNESSECERY TO UNDERSTAND THE CODE
if " " not in file.split("_")[0]:
names.append(file.split("_")[0])
else:
names.append(file.split("_")[0].split(" ")[0])
only_speakers = [] + names
namesWithoutDuplicate = list(dict.fromkeys(names))
namesWithoutDuplicateCopy = namesWithoutDuplicate[:]
for name in namesWithoutDuplicateCopy: # we remove low samples files
if names.count(name) < 107:
namesWithoutDuplicate.remove(name)
names = namesWithoutDuplicate
print(names) # print it
vector_names = [] # output for each name
i = 0
for name in names:
vector_for_each_name = i
vector_names.append(np.array(vector_for_each_name))
i += 1
for f in onlyfiles: # for all the files
if " " not in f.split("_")[0]:
f_speaker = f.split("_")[0]
else:
f_speaker = f.split("_")[0].split(" ")[0]
if f_speaker in namesWithoutDuplicate:
fs, audio = wav.read("FinalAudios\" + f) # read the file
try:
# compute MFCC
mfcc_feat = python_speech_features.mfcc(audio, samplerate=fs, winlen=win_len, winstep=step, nfft=nfft, appendEnergy=False)
#flat_list = [item for sublist in mfcc_feat for item in sublist]
# Create output + inputs
for i in mfcc_feat:
X.append(np.array(i))
Y.append(np.array(vector_names[names.index(f_speaker)]))
except IndexError:
pass
else:
if not os.path.exists("TooLowSamples"): # if path not exist we create it
os.makedirs("TooLowSamples")
shutil.move("FinalAudios\" + f, "TooLowSamples\" + f)
outfile_x = TemporaryFile()
np.save(outfile_x, X)
outfile_y = TemporaryFile()
np.save(outfile_y, Y)
# ------------------- RANDOMIZATION, UNNECESSARY TO UNDERSTAND THE CODE ------------------- #
else:
outfile_x.seek(0)
X = np.load(outfile_x)
outfile_y.seek(0)
Y = np.load(outfile_y)
Z = list(zip(X, Y))
shuffle(Z) # WE SHUFFLE X,Y TO PERFORM RANDOM ON THE TEST LEVEL
X, Y = zip(*Z)
X = list(X)
Y = list(Y)
lenX = len(X)
# ------------------- RANDOMIZATION, UNNECESSARY TO UNDERSTAND THE CODE ------------------- #
y_test = np.asarray(Y[:4000]) # CHOOSE 100 FOR TEST, OTHERS FOR TRAIN
x_test = np.asarray(X[:4000]) # CHOOSE 100 FOR TEST, OTHERS FOR TRAIN
x_train = np.asarray(X[4000:]) # CHOOSE 100 FOR TEST, OTHERS FOR TRAIN
y_train = np.asarray(Y[4000:]) # CHOOSE 100 FOR TEST, OTHERS FOR TRAIN
x_val = x_train[-4000:] # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
y_val = y_train[-4000:] # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
x_train = x_train[:-4000] # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
y_train = y_train[:-4000] # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
x_train = x_train.reshape(np.append(x_train.shape, (1, 1))) # RESHAPE FOR INPUT
x_test = x_test.reshape(np.append(x_test.shape, (1, 1))) # RESHAPE FOR INPUT
x_val = x_val.reshape(np.append(x_val.shape, (1, 1))) # RESHAPE FOR INPUT
features_shape = x_val.shape
# -------------- OUR TENSOR FLOW NEURAL NETWORK MODEL -------------- #
model = tf.keras.models.Sequential([
tf.keras.layers.Input(name='inputs', shape=(13, 1, 1), dtype='float32'),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', strides=1, name='block1_conv', input_shape=(13, 1, 1)),
tf.keras.layers.MaxPooling2D((3, 3), strides=(2,2), padding='same', name='block1_pool'),
tf.keras.layers.BatchNormalization(name='block1_norm'),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', strides=1, name='block2_conv',
input_shape=(13, 1, 1)),
tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool'),
tf.keras.layers.BatchNormalization(name='block2_norm'),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', strides=1, name='block3_conv',
input_shape=(13, 1, 1)),
tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool'),
tf.keras.layers.BatchNormalization(name='block3_norm'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu', name='dense'),
tf.keras.layers.BatchNormalization(name='dense_norm'),
tf.keras.layers.Dropout(0.2, name='dropout'),
tf.keras.layers.Dense(10, activation='softmax', name='pred')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# -------------- OUR TENSOR FLOW NEURAL NETWORK MODEL -------------- #
print("fitting")
history = model.fit(x_train, y_train, epochs=15, validation_data=(x_val, y_val))
print("testing")
results.append(model.evaluate(x_test, y_test)[1])
print(results)
print(sum(results)/len(results))
for i in range(10000):
f_1 = only_speakers[randint(0, len(only_speakers) - 1)]
f_2 = only_speakers[randint(0, len(only_speakers) - 1)]
if " " not in f_1.split("_")[0]:
f_speaker_1 = f_1.split("_")[0]
else:
f_speaker_1 =f_1.split("_")[0].split(" ")[0]
if " " not in f_2.split("_")[0]:
f_speaker_2 = f_2.split("_")[0]
else:
f_speaker_2 =f_2.split("_")[0].split(" ")[0]
if f_speaker_2 == f_speaker_1:
winner.append(1)
else:
winner.append(0)
print(sum(winner)/len(winner))
#]
# if onlyfiles[randint(len(onlyfiles) - 1)] == onlyfiles[randint(len(onlyfiles) - 1)]
#pyplot.plot(history.history['loss'], label='train')
#pyplot.plot(history.history['val_loss'], label='test') Q
#pyplot.legend()
#pyplot.show()
阅读您的 post 这些是我可以建议您的以下内容 fix/explore
42% 对于您手头的任务来说并不是那么令人印象深刻,请考虑您 交叉验证 的方式,例如你如何划分验证、测试和训练数据集
您的数据集似乎非常有限。你的任务是识别说话者。单集可能不足以完成此任务。
您可能需要考虑深度神经网络库,例如 Keras 和 Tensorflow。卷积是您可以直接应用于 MFC Graph 的东西。
如果您决定使用 Tensorflow 或 Keras,请考虑使用 Triplet-Loss,您可以在其中预设正例和负例。
考虑阅读当前最先进的技术来完成您的任务:https://github.com/grausof/keras-sincnet
考虑阅读 https://arxiv.org/abs/1503.03832 并将其用于语音识别。
改善结果最简单的方法是添加 CNN 层以从 MFCC
中提取特征
我正在研究说话人识别神经网络。
我正在做的是获取 wav 文件 [Bing Bang Theory first espiode :-)],然后将其转换为 MFCC 系数,然后将其作为开源的输入 api 的神经网络 (MLPClassifier) 作为输出,我为每个说话者定义了一个唯一的向量(比方说:[1,0,0,0] - sheldon;[0,1,0,0] - Penny;等等。 ..),我取 50 个随机值进行测试,其他的用于拟合(训练)
这是我的代码,一开始我得到了关于 NN 的随机准确率,但在一些了不起的人的帮助下我将它改进到 ~42% 但我想要更多 :) 大约 70% :
from sklearn.neural_network import MLPClassifier
import python_speech_features
import scipy.io.wavfile as wav
import numpy as np
from os import listdir
from os.path import isfile, join
from random import shuffle
import matplotlib.pyplot as plt
from tqdm import tqdm
from random import randint
import random
winner = [] # this array count how much Bingo we had when we test the NN
random_winner = []
win_len = 0.04 # in seconds
step = win_len / 2
nfft = 2048
for TestNum in tqdm(range(20)): # in every round we build NN with X,Y that out of them we check 50 after we build the NN
X = []
Y = []
onlyfiles = [f for f in listdir("FinalAudios/") if isfile(join("FinalAudios/", f))] # Files in dir
names = [] # names of the speakers
for file in onlyfiles: # for each wav sound
# UNESSECERY TO UNDERSTAND THE CODE
if " " not in file.split("_")[0]:
names.append(file.split("_")[0])
else:
names.append(file.split("_")[0].split(" ")[0])
only_speakers = [] + names
#print only_speakers
names = list(dict.fromkeys(names)) # names of speakers
print names
vector_names = [] # vector for each name
i = 0
vector_for_each_name = [0] * len(names)
for name in names:
vector_for_each_name[i] += 1
vector_names.append(np.array(vector_for_each_name))
vector_for_each_name[i] -= 1
i += 1
for f in onlyfiles:
if " " not in f.split("_")[0]:
f_speaker = f.split("_")[0]
else:
f_speaker = f.split("_")[0].split(" ")[0]
fs, audio = wav.read("FinalAudios/" + f) # read the file
try:
mfcc_feat = python_speech_features.mfcc(audio, samplerate=fs, winlen=win_len,
winstep=step, nfft=nfft, appendEnergy=False)
flat_list = [item for sublist in mfcc_feat for item in sublist]
X.append(np.array(flat_list))
Y.append(np.array(vector_names[names.index(f_speaker)]))
except IndexError:
pass
Z = list(zip(X, Y))
shuffle(Z) # WE SHUFFLE X,Y TO PERFORM RANDOM ON THE TEST LEVEL
X, Y = zip(*Z)
X = list(X)
Y = list(Y)
X = np.asarray(X)
Y = np.asarray(Y)
Y_test = Y[:50] # CHOOSE 50 FOR TEST, OTHERS FOR TRAIN
X_test = X[:50]
X = X[50:]
Y = Y[50:]
print len(X)
clf = MLPClassifier(solver='lbfgs', alpha=3e-2, hidden_layer_sizes=(50, 20), random_state=2) # create the NN
clf.fit(X, Y) # Train it
print list(clf.predict_proba([X[0]])[0])
print list(Y_test[0])
for sample in range(len(X_test)): # add 1 to winner array if we correct and 0 if not, than in the end it plot it
arr = list(clf.predict([X_test[sample]])[0])
if arr.index(max(arr)) == list(Y_test[sample]).index(1):
winner.append(1)
else:
winner.append(0)
if only_speakers[randint(0, len(only_speakers) - 1)] == only_speakers[randint(0, len(only_speakers) - 1)]:
random_winner.append(1)
else:
random_winner.append(0)
# plot winner
plot_x = []
plot_y = []
for i in range(1, len(winner)):
plot_y.append(sum(winner[0:i])*1.0/len(winner[0:i]))
plot_x.append(i)
plot_random_x = []
plot_random_y = []
for i in range(1, len(random_winner)):
plot_random_y.append(sum(random_winner[0:i])*1.0/len(random_winner[0:i]))
plot_random_x.append(i)
plt.plot(plot_x, plot_y, 'r', label='machine learning')
plt.plot(plot_random_x, plot_random_y, 'b', label='random')
plt.xlabel('Number Of Samples')
# naming the y axis
plt.ylabel('Success Rate')
# giving a title to my graph
plt.title('Success Rate : Random Vs ML!')
# function to show the plot
plt.show()
这是我的 zip 文件,其中包含代码和音频文件:https://ufile.io/eggjm1gw
有人知道我怎样才能提高我的准确性?
编辑:
我改进了我的数据集并放入了卷积模型,得到了 60% 的准确率,这还可以,但也不是很好
import python_speech_features
import scipy.io.wavfile as wav
import numpy as np
from os import listdir
import os
import shutil
from os.path import isfile, join
from random import shuffle
from matplotlib import pyplot
from tqdm import tqdm
from random import randint
import tensorflow as tf
from ast import literal_eval as str2arr
from tempfile import TemporaryFile
#win_len = 0.04 # in seconds
#step = win_len / 2
#nfft = 2048
win_len = 0.05 # in seconds
step = win_len
nfft = 16384
results = []
outfile_x = None
outfile_y = None
winner = []
for TestNum in tqdm(range(40)): # We check it several times
if not outfile_x: # if path not exist we create it
X = [] # inputs
Y = [] # outputs
onlyfiles = [f for f in listdir("FinalAudios") if isfile(join("FinalAudios", f))] # Files in dir
names = [] # names of the speakers
for file in onlyfiles: # for each wav sound
# UNESSECERY TO UNDERSTAND THE CODE
if " " not in file.split("_")[0]:
names.append(file.split("_")[0])
else:
names.append(file.split("_")[0].split(" ")[0])
only_speakers = [] + names
namesWithoutDuplicate = list(dict.fromkeys(names))
namesWithoutDuplicateCopy = namesWithoutDuplicate[:]
for name in namesWithoutDuplicateCopy: # we remove low samples files
if names.count(name) < 107:
namesWithoutDuplicate.remove(name)
names = namesWithoutDuplicate
print(names) # print it
vector_names = [] # output for each name
i = 0
for name in names:
vector_for_each_name = i
vector_names.append(np.array(vector_for_each_name))
i += 1
for f in onlyfiles: # for all the files
if " " not in f.split("_")[0]:
f_speaker = f.split("_")[0]
else:
f_speaker = f.split("_")[0].split(" ")[0]
if f_speaker in namesWithoutDuplicate:
fs, audio = wav.read("FinalAudios\" + f) # read the file
try:
# compute MFCC
mfcc_feat = python_speech_features.mfcc(audio, samplerate=fs, winlen=win_len, winstep=step, nfft=nfft, appendEnergy=False)
#flat_list = [item for sublist in mfcc_feat for item in sublist]
# Create output + inputs
for i in mfcc_feat:
X.append(np.array(i))
Y.append(np.array(vector_names[names.index(f_speaker)]))
except IndexError:
pass
else:
if not os.path.exists("TooLowSamples"): # if path not exist we create it
os.makedirs("TooLowSamples")
shutil.move("FinalAudios\" + f, "TooLowSamples\" + f)
outfile_x = TemporaryFile()
np.save(outfile_x, X)
outfile_y = TemporaryFile()
np.save(outfile_y, Y)
# ------------------- RANDOMIZATION, UNNECESSARY TO UNDERSTAND THE CODE ------------------- #
else:
outfile_x.seek(0)
X = np.load(outfile_x)
outfile_y.seek(0)
Y = np.load(outfile_y)
Z = list(zip(X, Y))
shuffle(Z) # WE SHUFFLE X,Y TO PERFORM RANDOM ON THE TEST LEVEL
X, Y = zip(*Z)
X = list(X)
Y = list(Y)
lenX = len(X)
# ------------------- RANDOMIZATION, UNNECESSARY TO UNDERSTAND THE CODE ------------------- #
y_test = np.asarray(Y[:4000]) # CHOOSE 100 FOR TEST, OTHERS FOR TRAIN
x_test = np.asarray(X[:4000]) # CHOOSE 100 FOR TEST, OTHERS FOR TRAIN
x_train = np.asarray(X[4000:]) # CHOOSE 100 FOR TEST, OTHERS FOR TRAIN
y_train = np.asarray(Y[4000:]) # CHOOSE 100 FOR TEST, OTHERS FOR TRAIN
x_val = x_train[-4000:] # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
y_val = y_train[-4000:] # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
x_train = x_train[:-4000] # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
y_train = y_train[:-4000] # FROM THE TRAIN CHOOSE 100 FOR VALIDATION
x_train = x_train.reshape(np.append(x_train.shape, (1, 1))) # RESHAPE FOR INPUT
x_test = x_test.reshape(np.append(x_test.shape, (1, 1))) # RESHAPE FOR INPUT
x_val = x_val.reshape(np.append(x_val.shape, (1, 1))) # RESHAPE FOR INPUT
features_shape = x_val.shape
# -------------- OUR TENSOR FLOW NEURAL NETWORK MODEL -------------- #
model = tf.keras.models.Sequential([
tf.keras.layers.Input(name='inputs', shape=(13, 1, 1), dtype='float32'),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', strides=1, name='block1_conv', input_shape=(13, 1, 1)),
tf.keras.layers.MaxPooling2D((3, 3), strides=(2,2), padding='same', name='block1_pool'),
tf.keras.layers.BatchNormalization(name='block1_norm'),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', strides=1, name='block2_conv',
input_shape=(13, 1, 1)),
tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block2_pool'),
tf.keras.layers.BatchNormalization(name='block2_norm'),
tf.keras.layers.Conv2D(32, (3, 3), activation='relu', padding='same', strides=1, name='block3_conv',
input_shape=(13, 1, 1)),
tf.keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same', name='block3_pool'),
tf.keras.layers.BatchNormalization(name='block3_norm'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu', name='dense'),
tf.keras.layers.BatchNormalization(name='dense_norm'),
tf.keras.layers.Dropout(0.2, name='dropout'),
tf.keras.layers.Dense(10, activation='softmax', name='pred')
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# -------------- OUR TENSOR FLOW NEURAL NETWORK MODEL -------------- #
print("fitting")
history = model.fit(x_train, y_train, epochs=15, validation_data=(x_val, y_val))
print("testing")
results.append(model.evaluate(x_test, y_test)[1])
print(results)
print(sum(results)/len(results))
for i in range(10000):
f_1 = only_speakers[randint(0, len(only_speakers) - 1)]
f_2 = only_speakers[randint(0, len(only_speakers) - 1)]
if " " not in f_1.split("_")[0]:
f_speaker_1 = f_1.split("_")[0]
else:
f_speaker_1 =f_1.split("_")[0].split(" ")[0]
if " " not in f_2.split("_")[0]:
f_speaker_2 = f_2.split("_")[0]
else:
f_speaker_2 =f_2.split("_")[0].split(" ")[0]
if f_speaker_2 == f_speaker_1:
winner.append(1)
else:
winner.append(0)
print(sum(winner)/len(winner))
#]
# if onlyfiles[randint(len(onlyfiles) - 1)] == onlyfiles[randint(len(onlyfiles) - 1)]
#pyplot.plot(history.history['loss'], label='train')
#pyplot.plot(history.history['val_loss'], label='test') Q
#pyplot.legend()
#pyplot.show()
阅读您的 post 这些是我可以建议您的以下内容 fix/explore
42% 对于您手头的任务来说并不是那么令人印象深刻,请考虑您 交叉验证 的方式,例如你如何划分验证、测试和训练数据集
您的数据集似乎非常有限。你的任务是识别说话者。单集可能不足以完成此任务。
您可能需要考虑深度神经网络库,例如 Keras 和 Tensorflow。卷积是您可以直接应用于 MFC Graph 的东西。
如果您决定使用 Tensorflow 或 Keras,请考虑使用 Triplet-Loss,您可以在其中预设正例和负例。
考虑阅读当前最先进的技术来完成您的任务:https://github.com/grausof/keras-sincnet
考虑阅读 https://arxiv.org/abs/1503.03832 并将其用于语音识别。
改善结果最简单的方法是添加 CNN 层以从 MFCC
中提取特征