如何使用带有权重的预定义/训练 (hdf5) 文件来预测 class 的新脑电图数据?
How to use predefined/ trained (hdf5) file with wights to predict a class of new eeg data?
我有一个名为 (bestmodel.hdf5) 的预定义文件,它是使用 Keras 库 (python) 和 theano 创建的
使用以下代码训练的模型。
# set parameters
batch_size = 1280
nb_epoch = 3000 #6000
l1_decay=0.00
l2_decay=0 # .5
# 0.01 0.06
sigma=0.005
in_drop_rate = .2
drop_rate = .5
print (tr_X.shape[1])
# set network layout
model = Sequential()
model.add(Dense(2184, input_shape=(tr_X.shape[1],)
, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(in_drop_rate))
model.add(Dense(1310, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(786, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(472, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(4, W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(Activation('softmax'))
# Callbacks
model_checkpoint = ModelCheckpoint('best_model.hdf5', monitor='val_loss', save_best_only=True)
early = EarlyStopping(monitor='val_loss', patience=600, verbose=0)
# fit and evaluate the model
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.001))#SGD(lr=0.0019, momentum=0.9, decay=0.0, nesterov=True))
history = model.fit(tr_X, tr_y, batch_size=batch_size,
nb_epoch=nb_epoch, verbose=0, callbacks=[early, model_checkpoint],
validation_data=(va_X, va_y))
model.load_weights('best_model.hdf5')
tr_pr = model.predict(tr_X, batch_size=batch_size, verbose=0)
但是,为了测试真实数据(形成实验),我有不同的输入大小(例如,我有 552 而不是 2184)
因此,读取hdf5权重文件并用它来预测数据的class。我写了以下内容:
# set parameters
batch_size = 4
l1_decay=0.00
l2_decay=0 # .5
# 0.01 0.06
sigma=0.005
in_drop_rate = .2
drop_rate = .5
# set network layout
model = Sequential()
model.add(Dense(552, input_shape=(552,)
, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(in_drop_rate))
model.add(Dense(331, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(189, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(119, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(4, W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(Activation('softmax'))
model.load_weights('best_model.hdf5')
te_pr = model.predict(X, batch_size=batch_size, verbose=0)
当我 运行 代码时,出现以下异常:
C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py:106: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(119, kernel_regularizer=<keras.reg..., kernel_initializer="he_normal")`
model.add(Dense(119, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py:112: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(4, kernel_regularizer=<keras.reg...)`
Traceback (most recent call last):
File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\main2.py", line 88, in BrowseFileHandler
expcal.calclate_Experiment()
File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py", line 66, in calclate_Experiment
predictions = DNN(X)
File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py", line 117, in DNN
te_pr = model.predict(X, batch_size=batch_size, verbose=0)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\models.py", line 902, in predict
return self.model.predict(x, batch_size=batch_size, verbose=verbose)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\engine\training.py", line 1585, in predict
batch_size=batch_size, verbose=verbose)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\engine\training.py", line 1212, in _predict_loop
batch_outs = f(ins_batch)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\backend\theano_backend.py", line 1158, in __call__
return self.function(*inputs)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\compile\function_module.py", line 898, in __call__
storage_map=getattr(self.fn, 'storage_map', None))
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\gof\link.py", line 325, in raise_with_op
reraise(exc_type, exc_value, exc_trace)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\compile\function_module.py", line 884, in __call__
self.fn() if output_subset is None else\
ValueError: dimension mismatch in args to gemm (4,552)x(2184,2184)->(4,2184)
Apply node that caused the error: GpuDot22(GpuFromHost.0, dense_1/kernel)
Toposort index: 28
Inputs types: [CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix)]
Inputs shapes: [(4, 552), (2184, 2184)]
Inputs strides: [(552, 1), (2184, 1)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[GpuElemwise{Add}[(0, 0)](GpuDot22.0,
GpuDimShuffle{x,0}.0), GpuElemwise{Composite{(i0 + i1 + (i2 * i3))}}[(0, 3)]
(GpuDot22.0, GpuDimShuffle{x,0}.0, CudaNdarrayConstant{[[ 0.005]]}, GpuReshape{2}.0)]]
HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
model.add(Dense(4, W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
任何人都可以帮助理解这个问题。我是这个领域的新手,尤其是在使用 Keras 和 theano 方面?我该如何解决?有没有办法改变预测模型?
此致,
这很简单。
您训练的模型的第一层是 2184x2184 矩阵。因此,您保存的权重是针对 2184 输入进行训练的,并且它们已适应您正在训练的输入类型。
如果我理解正确的话,您想将此矩阵应用于 552 长度的输入...您正在构建一个模型,其中第一层是 552x552 矩阵,并且您想将 2184x2184 矩阵加载到其中...只是没有办法做到这一点......这是行不通的,你的输入应该完全相同。您无法更改经过训练的模型。
我希望你明白为什么它不起作用 :-) 如果不明白,请要求澄清
我有一个名为 (bestmodel.hdf5) 的预定义文件,它是使用 Keras 库 (python) 和 theano 创建的
使用以下代码训练的模型。
# set parameters
batch_size = 1280
nb_epoch = 3000 #6000
l1_decay=0.00
l2_decay=0 # .5
# 0.01 0.06
sigma=0.005
in_drop_rate = .2
drop_rate = .5
print (tr_X.shape[1])
# set network layout
model = Sequential()
model.add(Dense(2184, input_shape=(tr_X.shape[1],)
, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(in_drop_rate))
model.add(Dense(1310, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(786, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(472, init='he_normal', W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(4, W_regularizer=l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(Activation('softmax'))
# Callbacks
model_checkpoint = ModelCheckpoint('best_model.hdf5', monitor='val_loss', save_best_only=True)
early = EarlyStopping(monitor='val_loss', patience=600, verbose=0)
# fit and evaluate the model
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=0.001))#SGD(lr=0.0019, momentum=0.9, decay=0.0, nesterov=True))
history = model.fit(tr_X, tr_y, batch_size=batch_size,
nb_epoch=nb_epoch, verbose=0, callbacks=[early, model_checkpoint],
validation_data=(va_X, va_y))
model.load_weights('best_model.hdf5')
tr_pr = model.predict(tr_X, batch_size=batch_size, verbose=0)
但是,为了测试真实数据(形成实验),我有不同的输入大小(例如,我有 552 而不是 2184)
因此,读取hdf5权重文件并用它来预测数据的class。我写了以下内容:
# set parameters
batch_size = 4
l1_decay=0.00
l2_decay=0 # .5
# 0.01 0.06
sigma=0.005
in_drop_rate = .2
drop_rate = .5
# set network layout
model = Sequential()
model.add(Dense(552, input_shape=(552,)
, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(in_drop_rate))
model.add(Dense(331, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(189, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(119, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(GaussianNoise(sigma))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(drop_rate))
model.add(Dense(4, W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
model.add(Activation('softmax'))
model.load_weights('best_model.hdf5')
te_pr = model.predict(X, batch_size=batch_size, verbose=0)
当我 运行 代码时,出现以下异常:
C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py:106: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(119, kernel_regularizer=<keras.reg..., kernel_initializer="he_normal")`
model.add(Dense(119, init='he_normal', W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py:112: UserWarning: Update your `Dense` call to the Keras 2 API: `Dense(4, kernel_regularizer=<keras.reg...)`
Traceback (most recent call last):
File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\main2.py", line 88, in BrowseFileHandler
expcal.calclate_Experiment()
File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py", line 66, in calclate_Experiment
predictions = DNN(X)
File "C:\Users\M\Desktop\Dr Abeer Folder\Emotion Project_code and dataset\End User\Experiment_Calculation.py", line 117, in DNN
te_pr = model.predict(X, batch_size=batch_size, verbose=0)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\models.py", line 902, in predict
return self.model.predict(x, batch_size=batch_size, verbose=verbose)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\engine\training.py", line 1585, in predict
batch_size=batch_size, verbose=verbose)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\engine\training.py", line 1212, in _predict_loop
batch_outs = f(ins_batch)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\keras\backend\theano_backend.py", line 1158, in __call__
return self.function(*inputs)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\compile\function_module.py", line 898, in __call__
storage_map=getattr(self.fn, 'storage_map', None))
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\gof\link.py", line 325, in raise_with_op
reraise(exc_type, exc_value, exc_trace)
File "C:\Users\M\AppData\Roaming\Python\Python27\site-packages\theano\compile\function_module.py", line 884, in __call__
self.fn() if output_subset is None else\
ValueError: dimension mismatch in args to gemm (4,552)x(2184,2184)->(4,2184)
Apply node that caused the error: GpuDot22(GpuFromHost.0, dense_1/kernel)
Toposort index: 28
Inputs types: [CudaNdarrayType(float32, matrix), CudaNdarrayType(float32, matrix)]
Inputs shapes: [(4, 552), (2184, 2184)]
Inputs strides: [(552, 1), (2184, 1)]
Inputs values: ['not shown', 'not shown']
Outputs clients: [[GpuElemwise{Add}[(0, 0)](GpuDot22.0,
GpuDimShuffle{x,0}.0), GpuElemwise{Composite{(i0 + i1 + (i2 * i3))}}[(0, 3)]
(GpuDot22.0, GpuDimShuffle{x,0}.0, CudaNdarrayConstant{[[ 0.005]]}, GpuReshape{2}.0)]]
HINT: Re-running with most Theano optimization disabled could give you a back-trace of when this node was created. This can be done with by setting the Theano flag 'optimizer=fast_compile'. If that does not work, Theano optimizations can be disabled with 'optimizer=None'.
HINT: Use the Theano flag 'exception_verbosity=high' for a debugprint and storage map footprint of this apply node.
model.add(Dense(4, W_regularizer=regularizers.l1_l2(l1=l1_decay, l2=l2_decay)))
任何人都可以帮助理解这个问题。我是这个领域的新手,尤其是在使用 Keras 和 theano 方面?我该如何解决?有没有办法改变预测模型?
此致,
这很简单。
您训练的模型的第一层是 2184x2184 矩阵。因此,您保存的权重是针对 2184 输入进行训练的,并且它们已适应您正在训练的输入类型。
如果我理解正确的话,您想将此矩阵应用于 552 长度的输入...您正在构建一个模型,其中第一层是 552x552 矩阵,并且您想将 2184x2184 矩阵加载到其中...只是没有办法做到这一点......这是行不通的,你的输入应该完全相同。您无法更改经过训练的模型。
我希望你明白为什么它不起作用 :-) 如果不明白,请要求澄清