如何在keras中使用lambda层?
How to use lambda layer in keras?
我想定义lambda层来结合特征和叉积,然后合并这些模型,就像图。 ,我该怎么办?
测试model_1,得到128维的form dense,使用pywt
得到两个64维的feature(cA,cD
),然后return cA*cD //当然我想合并两个模型,但先尝试 model_1。
from keras.models import Sequential,Model
from keras.layers import Input,Convolution2D,MaxPooling2D
from keras.layers.core import Dense,Dropout,Activation,Flatten,Lambda
import pywt
def myFunc(x):
(cA, cD) = pywt.dwt(x, 'db1')
# x=x*x
return cA*cD
batch_size=32
nb_classes=3
nb_epoch=20
img_rows,img_cols=200,200
img_channels=1
nb_filters=32
nb_pool=2
nb_conv=3
inputs=Input(shape=(1,img_rows,img_cols))
x=Convolution2D(nb_filters,nb_conv,nb_conv,border_mode='valid',
input_shape=(1,img_rows,img_cols),activation='relu')(inputs)
x=Convolution2D(nb_filters,nb_conv,nb_conv,activation='relu')(x)
x=MaxPooling2D(pool_size=(nb_pool,nb_pool))(x)
x=Dropout(0.25)(x)
x=Flatten()(x)
y=Dense(128,activation='relu')(x)
cross=Lambda(myFunc,output_shape=(64,))(y)
predictions=Dense(nb_classes,activation='softmax')(cross)
model = Model(input=inputs, output=predictions)
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
model.fit(X_train,Y_train,batch_size=batch_size,nb_epoch=nb_epoch,
verbose=1,validation_data=(X_test,Y_test))
抱歉,我可以问一个关于张量的问题吗?
import tensorflow as tf
W1 = tf.Variable(np.array([[1,2],[3,4]]))
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
array = W1.eval(sess)
print (array)
没错!然而,
from keras import backend as K
import numpy as np
kvar=K.variable(np.array([[1,2],[3,4]]))
K.eval(kvar)
print(kvar)
我得到了 <CudaNdarrayType(float32, matrix)>
并且 kvar.eval()
我得到了 b'CudaNdarray([[ 1. 2.]\n [ 3. 4.]])'
。我使用 keras,那么如何使用 keras 获取类似 tensorflow 的数组?
我可能会复制密集层。不是 2 层 128 个单元,而是 4 层 64 个单元。结果是一样的,但您将能够更好地执行叉积。
from keras.models import Model
#create dense layers and store their output tensors, they use the output of models 1 and to as input
d1 = Dense(64, ....)(Model_1.output)
d2 = Dense(64, ....)(Model_1.output)
d3 = Dense(64, ....)(Model_2.output)
d4 = Dense(64, ....)(Model_2.output)
cross1 = Lambda(myFunc, output_shape=....)([d1,d4])
cross2 = Lambda(myFunc, output_shape=....)([d2,d3])
#I don't really know what kind of "merge" you want, so I used concatenate, there are Add, Multiply and others....
output = Concatenate()([cross1,cross2])
#use the "axis" attribute of the concatenate layer to define better which axis will be doubled due to the concatenation
model = Model([Model_1.input,Model_2.input], output)
现在,对于 lambda 函数:
import keras.backend as K
def myFunc(x):
return x[0] * x[1]
我想定义lambda层来结合特征和叉积,然后合并这些模型,就像图。 ,我该怎么办?
测试model_1,得到128维的form dense,使用pywt
得到两个64维的feature(cA,cD
),然后return cA*cD //当然我想合并两个模型,但先尝试 model_1。
from keras.models import Sequential,Model
from keras.layers import Input,Convolution2D,MaxPooling2D
from keras.layers.core import Dense,Dropout,Activation,Flatten,Lambda
import pywt
def myFunc(x):
(cA, cD) = pywt.dwt(x, 'db1')
# x=x*x
return cA*cD
batch_size=32
nb_classes=3
nb_epoch=20
img_rows,img_cols=200,200
img_channels=1
nb_filters=32
nb_pool=2
nb_conv=3
inputs=Input(shape=(1,img_rows,img_cols))
x=Convolution2D(nb_filters,nb_conv,nb_conv,border_mode='valid',
input_shape=(1,img_rows,img_cols),activation='relu')(inputs)
x=Convolution2D(nb_filters,nb_conv,nb_conv,activation='relu')(x)
x=MaxPooling2D(pool_size=(nb_pool,nb_pool))(x)
x=Dropout(0.25)(x)
x=Flatten()(x)
y=Dense(128,activation='relu')(x)
cross=Lambda(myFunc,output_shape=(64,))(y)
predictions=Dense(nb_classes,activation='softmax')(cross)
model = Model(input=inputs, output=predictions)
model.compile(loss='categorical_crossentropy',optimizer='adadelta',metrics=['accuracy'])
model.fit(X_train,Y_train,batch_size=batch_size,nb_epoch=nb_epoch,
verbose=1,validation_data=(X_test,Y_test))
抱歉,我可以问一个关于张量的问题吗?
import tensorflow as tf
W1 = tf.Variable(np.array([[1,2],[3,4]]))
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
array = W1.eval(sess)
print (array)
没错!然而,
from keras import backend as K
import numpy as np
kvar=K.variable(np.array([[1,2],[3,4]]))
K.eval(kvar)
print(kvar)
我得到了 <CudaNdarrayType(float32, matrix)>
并且 kvar.eval()
我得到了 b'CudaNdarray([[ 1. 2.]\n [ 3. 4.]])'
。我使用 keras,那么如何使用 keras 获取类似 tensorflow 的数组?
我可能会复制密集层。不是 2 层 128 个单元,而是 4 层 64 个单元。结果是一样的,但您将能够更好地执行叉积。
from keras.models import Model
#create dense layers and store their output tensors, they use the output of models 1 and to as input
d1 = Dense(64, ....)(Model_1.output)
d2 = Dense(64, ....)(Model_1.output)
d3 = Dense(64, ....)(Model_2.output)
d4 = Dense(64, ....)(Model_2.output)
cross1 = Lambda(myFunc, output_shape=....)([d1,d4])
cross2 = Lambda(myFunc, output_shape=....)([d2,d3])
#I don't really know what kind of "merge" you want, so I used concatenate, there are Add, Multiply and others....
output = Concatenate()([cross1,cross2])
#use the "axis" attribute of the concatenate layer to define better which axis will be doubled due to the concatenation
model = Model([Model_1.input,Model_2.input], output)
现在,对于 lambda 函数:
import keras.backend as K
def myFunc(x):
return x[0] * x[1]