Keras 中的自定义 Hebbian 层实现 - input/output 暗淡和横向节点连接
Custom Hebbian Layer Implementation in Keras - input/output dims and lateral node connections
我正在尝试在 Keras 中使用 Hebb 更新实现无监督 ANN。我在这里找到了 Dan Saunders 制作的自定义 Hebbian 图层 - https://github.com/djsaunde/rinns_python/blob/master/hebbian/hebbian.py
(我希望在这里问别人代码的问题不是不好的形式)
在我发现在 repo 中使用该层的示例中,该层用作 Dense/Conv 层之间的中间层,但我想构建一个仅使用 Hebb 层的网络。
在这个实现中有两件关键的事情让我感到困惑:
似乎输入调光和输出调光必须相同才能使该层工作。为什么会这样,我该怎么做才能使它们有所不同?
为什么权重矩阵的对角线设置为零?它说这是到"ensure that no neuron is laterally connected to itself",但我认为连接权重是在前一层和当前层之间,而不是当前层和它自己之间。
这是 Hebbian 层实现的代码:
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
import tensorflow as tf
np.set_printoptions(threshold=np.nan)
sess = tf.Session()
class Hebbian(Layer):
def __init__(self, output_dim, lmbda=1.0, eta=0.0005, connectivity='random', connectivity_prob=0.25, **kwargs):
'''
Constructor for the Hebbian learning layer.
args:
output_dim - The shape of the output / activations computed by the layer.
lambda - A floating-point valued parameter governing the strength of the Hebbian learning activation.
eta - A floating-point valued parameter governing the Hebbian learning rate.
connectivity - A string which determines the way in which the neurons in this layer are connected to
the neurons in the previous layer.
'''
self.output_dim = output_dim
self.lmbda = lmbda
self.eta = eta
self.connectivity = connectivity
self.connectivity_prob = connectivity_prob
if self.connectivity == 'random':
self.B = np.random.random(self.output_dim) < self.connectivity_prob
elif self.connectivity == 'zero':
self.B = np.zeros(self.output_dim)
super(Hebbian, self).__init__(**kwargs)
def random_conn_init(self, shape, dtype=None):
A = np.random.normal(0, 1, shape)
A[self.B] = 0
return tf.constant(A, dtype=tf.float32)
def zero_init(self, shape, dtype=None):
return np.zeros(shape)
def build(self, input_shape):
# create weight variable for this layer according to user-specified initialization
if self.connectivity == 'all':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer='uniform', trainable=False)
elif self.connectivity == 'random':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer=self.random_conn_init, trainable=False)
elif self.connectivity == 'zero':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer=self.zero_init, trainable=False)
else:
raise NotImplementedError
# ensure that no neuron is laterally connected to itself
self.kernel = self.kernel * tf.diag(tf.zeros(self.output_dim))
# call superclass "build" function
super(Hebbian, self).build(input_shape)
def call(self, x):
x_shape = tf.shape(x)
batch_size = tf.shape(x)[0]
# reshape to (batch_size, product of other dimensions) shape
x = tf.reshape(x, (tf.reduce_prod(x_shape[1:]), batch_size))
# compute activations using Hebbian-like update rule
activations = x + self.lmbda * tf.matmul(self.kernel, x)
# compute outer product of activations matrix with itself
outer_product = tf.matmul(tf.expand_dims(x, 1), tf.expand_dims(x, 0))
# update the weight matrix of this layer
self.kernel = self.kernel + tf.multiply(self.eta, tf.reduce_mean(outer_product, axis=2))
self.kernel = tf.multiply(self.kernel, self.B)
self.kernel = self.kernel * tf.diag(tf.zeros(self.output_dim))
return K.reshape(activations, x_shape)
起初我希望这一层能够从前一层获取输入,执行简单的激活计算(输入 * 权重),根据 Hebb 更新更新权重(类似于 - 如果激活很高 b/t节点,增加权重),然后将激活传递给下一层。
我还期望它能够处理 decreasing/increasing 从一层到下一层的节点数。
相反,我似乎无法弄清楚为什么输入和输出 dims 必须相同以及为什么权重矩阵的对角线设置为零。
代码中的哪个位置(隐式或显式)规定图层必须具有相同的暗淡度?
代码中的哪个位置(隐式或显式)说明该层的权重矩阵将当前层连接到自身?
很抱歉,如果这个问题应该被分成 2 个,但看起来它们可能与 e/o 有关,所以我将它们保留为 1。
如果需要,很乐意提供更多详细信息。
编辑:意识到我忘记添加我在尝试创建输出暗淡与输入暗淡不同的图层时收到的错误消息:
model = Sequential()
model.add(Hebbian(input_shape = (256,1), output_dim = 256))
这样编译w/o错误^
model = Sequential()
model.add(Hebbian(input_shape = (256,1), output_dim = 24))
这个 ^ 抛出错误:
IndexError:布尔索引与维度 0 上的索引数组不匹配;维度是 256 但相应的布尔维度是 24
好吧,我想我大概明白了。有很多小问题,但最重要的是我需要添加 compute_output_shape 函数,使图层能够修改其输入的形状,如下所述:
https://keras.io/layers/writing-your-own-keras-layers/
这里是包含我所做的所有更改的代码。它将很好地编译和修改输入形状。请注意,该层计算层本身内部的权重变化,如果您尝试实际使用该层,可能会出现一些问题(我仍在解决这些问题),但这是一个单独的问题。
class Hebbian(Layer):
def __init__(self, output_dim, lmbda=1.0, eta=0.0005, connectivity='random', connectivity_prob=0.25, **kwargs):
'''
Constructor for the Hebbian learning layer.
args:
output_dim - The shape of the output / activations computed by the layer.
lambda - A floating-point valued parameter governing the strength of the Hebbian learning activation.
eta - A floating-point valued parameter governing the Hebbian learning rate.
connectivity - A string which determines the way in which the neurons in this layer are connected to
the neurons in the previous layer.
'''
self.output_dim = output_dim
self.lmbda = lmbda
self.eta = eta
self.connectivity = connectivity
self.connectivity_prob = connectivity_prob
super(Hebbian, self).__init__(**kwargs)
def random_conn_init(self, shape, dtype=None):
A = np.random.normal(0, 1, shape)
A[self.B] = 0
return tf.constant(A, dtype=tf.float32)
def zero_init(self, shape, dtype=None):
return np.zeros(shape)
def build(self, input_shape):
# create weight variable for this layer according to user-specified initialization
if self.connectivity == 'random':
self.B = np.random.random(input_shape[0]) < self.connectivity_prob
elif self.connectivity == 'zero':
self.B = np.zeros(self.output_dim)
if self.connectivity == 'all':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer='uniform', trainable=False)
elif self.connectivity == 'random':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer=self.random_conn_init, trainable=False)
elif self.connectivity == 'zero':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer=self.zero_init, trainable=False)
else:
raise NotImplementedError
# call superclass "build" function
super(Hebbian, self).build(input_shape)
def call(self, x): # x is the input to the network
x_shape = tf.shape(x)
batch_size = tf.shape(x)[0]
# reshape to (batch_size, product of other dimensions) shape
x = tf.reshape(x, (tf.reduce_prod(x_shape[1:]), batch_size))
# compute activations using Hebbian-like update rule
activations = x + self.lmbda * tf.matmul(self.kernel, x)
# compute outer product of activations matrix with itself
outer_product = tf.matmul(tf.expand_dims(x, 1), tf.expand_dims(x, 0))
# update the weight matrix of this layer
self.kernel = self.kernel + tf.multiply(self.eta, tf.reduce_mean(outer_product, axis=2))
self.kernel = tf.multiply(self.kernel, self.B)
return K.reshape(activations, x_shape)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
如果有人从 Google 来到这里(像我一样;反复)尝试制作一个在调用新输入时在线学习的层,我刚刚发现了另一个问题,我认为它是相关的:
Self.call 仅在您定义图形时调用,为了学习在每个新输入上发生,您需要将 self.add_update 添加到调用函数。
我正在尝试在 Keras 中使用 Hebb 更新实现无监督 ANN。我在这里找到了 Dan Saunders 制作的自定义 Hebbian 图层 - https://github.com/djsaunde/rinns_python/blob/master/hebbian/hebbian.py (我希望在这里问别人代码的问题不是不好的形式)
在我发现在 repo 中使用该层的示例中,该层用作 Dense/Conv 层之间的中间层,但我想构建一个仅使用 Hebb 层的网络。
在这个实现中有两件关键的事情让我感到困惑:
似乎输入调光和输出调光必须相同才能使该层工作。为什么会这样,我该怎么做才能使它们有所不同?
为什么权重矩阵的对角线设置为零?它说这是到"ensure that no neuron is laterally connected to itself",但我认为连接权重是在前一层和当前层之间,而不是当前层和它自己之间。
这是 Hebbian 层实现的代码:
from keras import backend as K
from keras.engine.topology import Layer
import numpy as np
import tensorflow as tf
np.set_printoptions(threshold=np.nan)
sess = tf.Session()
class Hebbian(Layer):
def __init__(self, output_dim, lmbda=1.0, eta=0.0005, connectivity='random', connectivity_prob=0.25, **kwargs):
'''
Constructor for the Hebbian learning layer.
args:
output_dim - The shape of the output / activations computed by the layer.
lambda - A floating-point valued parameter governing the strength of the Hebbian learning activation.
eta - A floating-point valued parameter governing the Hebbian learning rate.
connectivity - A string which determines the way in which the neurons in this layer are connected to
the neurons in the previous layer.
'''
self.output_dim = output_dim
self.lmbda = lmbda
self.eta = eta
self.connectivity = connectivity
self.connectivity_prob = connectivity_prob
if self.connectivity == 'random':
self.B = np.random.random(self.output_dim) < self.connectivity_prob
elif self.connectivity == 'zero':
self.B = np.zeros(self.output_dim)
super(Hebbian, self).__init__(**kwargs)
def random_conn_init(self, shape, dtype=None):
A = np.random.normal(0, 1, shape)
A[self.B] = 0
return tf.constant(A, dtype=tf.float32)
def zero_init(self, shape, dtype=None):
return np.zeros(shape)
def build(self, input_shape):
# create weight variable for this layer according to user-specified initialization
if self.connectivity == 'all':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer='uniform', trainable=False)
elif self.connectivity == 'random':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer=self.random_conn_init, trainable=False)
elif self.connectivity == 'zero':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer=self.zero_init, trainable=False)
else:
raise NotImplementedError
# ensure that no neuron is laterally connected to itself
self.kernel = self.kernel * tf.diag(tf.zeros(self.output_dim))
# call superclass "build" function
super(Hebbian, self).build(input_shape)
def call(self, x):
x_shape = tf.shape(x)
batch_size = tf.shape(x)[0]
# reshape to (batch_size, product of other dimensions) shape
x = tf.reshape(x, (tf.reduce_prod(x_shape[1:]), batch_size))
# compute activations using Hebbian-like update rule
activations = x + self.lmbda * tf.matmul(self.kernel, x)
# compute outer product of activations matrix with itself
outer_product = tf.matmul(tf.expand_dims(x, 1), tf.expand_dims(x, 0))
# update the weight matrix of this layer
self.kernel = self.kernel + tf.multiply(self.eta, tf.reduce_mean(outer_product, axis=2))
self.kernel = tf.multiply(self.kernel, self.B)
self.kernel = self.kernel * tf.diag(tf.zeros(self.output_dim))
return K.reshape(activations, x_shape)
起初我希望这一层能够从前一层获取输入,执行简单的激活计算(输入 * 权重),根据 Hebb 更新更新权重(类似于 - 如果激活很高 b/t节点,增加权重),然后将激活传递给下一层。
我还期望它能够处理 decreasing/increasing 从一层到下一层的节点数。
相反,我似乎无法弄清楚为什么输入和输出 dims 必须相同以及为什么权重矩阵的对角线设置为零。
代码中的哪个位置(隐式或显式)规定图层必须具有相同的暗淡度?
代码中的哪个位置(隐式或显式)说明该层的权重矩阵将当前层连接到自身?
很抱歉,如果这个问题应该被分成 2 个,但看起来它们可能与 e/o 有关,所以我将它们保留为 1。
如果需要,很乐意提供更多详细信息。
编辑:意识到我忘记添加我在尝试创建输出暗淡与输入暗淡不同的图层时收到的错误消息:
model = Sequential()
model.add(Hebbian(input_shape = (256,1), output_dim = 256))
这样编译w/o错误^
model = Sequential()
model.add(Hebbian(input_shape = (256,1), output_dim = 24))
这个 ^ 抛出错误: IndexError:布尔索引与维度 0 上的索引数组不匹配;维度是 256 但相应的布尔维度是 24
好吧,我想我大概明白了。有很多小问题,但最重要的是我需要添加 compute_output_shape 函数,使图层能够修改其输入的形状,如下所述: https://keras.io/layers/writing-your-own-keras-layers/
这里是包含我所做的所有更改的代码。它将很好地编译和修改输入形状。请注意,该层计算层本身内部的权重变化,如果您尝试实际使用该层,可能会出现一些问题(我仍在解决这些问题),但这是一个单独的问题。
class Hebbian(Layer):
def __init__(self, output_dim, lmbda=1.0, eta=0.0005, connectivity='random', connectivity_prob=0.25, **kwargs):
'''
Constructor for the Hebbian learning layer.
args:
output_dim - The shape of the output / activations computed by the layer.
lambda - A floating-point valued parameter governing the strength of the Hebbian learning activation.
eta - A floating-point valued parameter governing the Hebbian learning rate.
connectivity - A string which determines the way in which the neurons in this layer are connected to
the neurons in the previous layer.
'''
self.output_dim = output_dim
self.lmbda = lmbda
self.eta = eta
self.connectivity = connectivity
self.connectivity_prob = connectivity_prob
super(Hebbian, self).__init__(**kwargs)
def random_conn_init(self, shape, dtype=None):
A = np.random.normal(0, 1, shape)
A[self.B] = 0
return tf.constant(A, dtype=tf.float32)
def zero_init(self, shape, dtype=None):
return np.zeros(shape)
def build(self, input_shape):
# create weight variable for this layer according to user-specified initialization
if self.connectivity == 'random':
self.B = np.random.random(input_shape[0]) < self.connectivity_prob
elif self.connectivity == 'zero':
self.B = np.zeros(self.output_dim)
if self.connectivity == 'all':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer='uniform', trainable=False)
elif self.connectivity == 'random':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer=self.random_conn_init, trainable=False)
elif self.connectivity == 'zero':
self.kernel = self.add_weight(name='kernel', shape=(np.prod(input_shape[1:]), \
np.prod(self.output_dim)), initializer=self.zero_init, trainable=False)
else:
raise NotImplementedError
# call superclass "build" function
super(Hebbian, self).build(input_shape)
def call(self, x): # x is the input to the network
x_shape = tf.shape(x)
batch_size = tf.shape(x)[0]
# reshape to (batch_size, product of other dimensions) shape
x = tf.reshape(x, (tf.reduce_prod(x_shape[1:]), batch_size))
# compute activations using Hebbian-like update rule
activations = x + self.lmbda * tf.matmul(self.kernel, x)
# compute outer product of activations matrix with itself
outer_product = tf.matmul(tf.expand_dims(x, 1), tf.expand_dims(x, 0))
# update the weight matrix of this layer
self.kernel = self.kernel + tf.multiply(self.eta, tf.reduce_mean(outer_product, axis=2))
self.kernel = tf.multiply(self.kernel, self.B)
return K.reshape(activations, x_shape)
def compute_output_shape(self, input_shape):
return (input_shape[0], self.output_dim)
如果有人从 Google 来到这里(像我一样;反复)尝试制作一个在调用新输入时在线学习的层,我刚刚发现了另一个问题,我认为它是相关的:
Self.call 仅在您定义图形时调用,为了学习在每个新输入上发生,您需要将 self.add_update 添加到调用函数。