将神经网络的输出限制在任意范围内

Constraining a neural network's output to be within an arbitrary range

我有一个自定义神经网络,我正在对数据进行训练,并试图将网络的输出值限制在两个任意常数之间:[lower_bound,upper_bound]。在损失函数中编码此约束是否有任何最佳实践?

下面我写了一个最小的工作示例,我在其中构建和训练生成数据的神经网络。此外,我在正在优化的损失函数中放置了输出应介于 [lower_bound,upper_bound] = [-0.5,0.75] 之间的任意约束。但是我尝试使用一种相对粗略的方法来查找预测值超过界限的所有实例,然后简单地使这些项的损失函数成为一个大值(如果预测值在给定范围内则为零):

lower_bound = -0.5 #a guessed a priori lower bound on the output
upper_bound = 0.75 #a guessed a priori upper bound on the output
cond_v1_1 = tf.greater(self.v1_pred[:,0], upper_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
cond_v1_2 = tf.greater(-1.0*self.v1_pred[:,0], lower_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
self.red_v1 = tf.where(cond_v1_1, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), 0.0*tf.zeros(tf.shape(self.v1_pred[:,0]))) 
self.red_v1 = tf.where(cond_v1_2, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), self.red_v1) 
self.loss_cond = tf.reduce_sum(1.0*tf.square(self.red_v1))

但是在训练神经网络时,有什么方法或损失函数可以更好地编码这个约束吗?也许更平滑的损失函数更容易让优化器处理 and/or 对我的代码本身的修改?如果对以下代码中的神经网络 penalizing/training 的最佳实践提出任何意见和进一步的想法,我们将不胜感激。


import numpy as np 
import tensorflow as tf

end_it = 1000 #number of iterations
frac_train = 1.0 #randomly sampled fraction of data to create training set
frac_sample_train = 0.01 #randomly sampled fraction of data from training set to train in batches
layers = [2, 20, 20, 20, 1]

#Generate training data
len_data = 10000
x_x = np.array([np.linspace(0.,1.,len_data)])
x_y = np.array([np.linspace(0.,1.,len_data)]) 
y_true = np.array([np.linspace(-0.2,0.2,len_data)])

N_train = int(frac_train*len_data)
idx = np.random.choice(len_data, N_train, replace=False)

x_train = x_x.T[idx,:]
y_train = x_y.T[idx,:] 
v1_train = y_true.T[idx,:] 

sample_batch_size = int(frac_sample_train*N_train)

np.random.seed(1234)
tf.set_random_seed(1234)
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
tf.logging.set_verbosity(tf.logging.ERROR)

class NeuralNet:
    def __init__(self, x, y, v1, layers):
        X = np.concatenate([x, y], 1)  
        self.lb = X.min(0)
        self.ub = X.max(0)
        self.X = X
        self.x = X[:,0:1]
        self.y = X[:,1:2] 
        self.v1 = v1 
        self.layers = layers 
        self.weights_v1, self.biases_v1 = self.initialize_NN(layers) 
        self.sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=False,
                                                     log_device_placement=False)) 
        self.x_tf = tf.placeholder(tf.float32, shape=[None, self.x.shape[1]])
        self.y_tf = tf.placeholder(tf.float32, shape=[None, self.y.shape[1]]) 
        self.v1_tf = tf.placeholder(tf.float32, shape=[None, self.v1.shape[1]])  
        self.v1_pred = self.net(self.x_tf, self.y_tf) 
        lower_bound = -0.5 #a guessed a priori lower bound on the output
        upper_bound = 0.75 #a guessed a priori upper bound on the output
        cond_v1_1 = tf.greater(self.v1_pred[:,0], upper_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
        cond_v1_2 = tf.greater(-1.0*self.v1_pred[:,0], lower_bound*tf.ones(tf.shape(self.v1_pred[:,0])))
        self.red_v1 = tf.where(cond_v1_1, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), 0.0*tf.zeros(tf.shape(self.v1_pred[:,0]))) 
        self.red_v1 = tf.where(cond_v1_2, 100000.0*tf.ones(tf.shape(self.v1_pred[:,0])), self.red_v1) 
        self.loss_cond = tf.reduce_sum(1.0*tf.square(self.red_v1))
        self.loss_data = tf.reduce_mean(tf.square(self.v1_tf - self.v1_pred)) 
        self.loss = self.loss_cond + self.loss_data
        self.optimizer = tf.contrib.opt.ScipyOptimizerInterface(self.loss,
                                                                var_list=self.weights_v1+self.biases_v1,
                                                                method = 'L-BFGS-B',
                                                                options = {'maxiter': 50,
                                                                           'maxfun': 50000,
                                                                           'maxcor': 50,
                                                                           'maxls': 50,
                                                                           'ftol' : 1.0 * np.finfo(float).eps})
        self.optimizer_Adam = tf.train.AdamOptimizer()
        self.train_op_Adam_v1 = self.optimizer_Adam.minimize(self.loss, var_list=self.weights_v1+self.biases_v1) 
        init = tf.global_variables_initializer()  
        self.sess.run(init)
    def initialize_NN(self, layers):
        weights = []
        biases = []
        num_layers = len(layers)
        for l in range(0,num_layers-1):
            W = self.xavier_init(size=[layers[l], layers[l+1]])
            b = tf.Variable(tf.zeros([1,layers[l+1]], dtype=tf.float32), dtype=tf.float32)
            weights.append(W)
            biases.append(b) 
        return weights, biases
    def xavier_init(self, size):
        in_dim = size[0]
        out_dim = size[1]
        xavier_stddev = np.sqrt(2/(in_dim + out_dim)) 
        return tf.Variable(tf.truncated_normal([in_dim, out_dim], stddev=xavier_stddev), dtype=tf.float32)
    def neural_net(self, X, weights, biases):
        num_layers = len(weights) + 1
        H = 2.0*(X - self.lb)/(self.ub - self.lb) - 1.0
        for l in range(0,num_layers-2):
            W = weights[l]
            b = biases[l]
            H = tf.tanh(tf.add(tf.matmul(H, W), b))
        W = weights[-1]
        b = biases[-1]
        Y = tf.add(tf.matmul(H, W), b) 
        return Y
    def net(self, x, y): 
        v1_out = self.neural_net(tf.concat([x,y], 1), self.weights_v1, self.biases_v1)
        v1 = v1_out[:,0:1]
        return v1
    def callback(self, loss):
        global Nfeval
        print(str(Nfeval)+' - Loss in loop: %.3e' % (loss))
        Nfeval += 1
    def fetch_minibatch(self, x_in, y_in, v1_in, N_train_sample):  
        idx_batch = np.random.choice(len(x_in), N_train_sample, replace=False)
        x_batch = x_in[idx_batch,:]
        y_batch = y_in[idx_batch,:] 
        v1_batch = v1_in[idx_batch,:] 
        return x_batch, y_batch, v1_batch
    def train(self, end_it):
        it = 0
        while it < end_it: 
            x_res_batch, y_res_batch, v1_res_batch = self.fetch_minibatch(self.x, self.y, self.v1, sample_batch_size) # Fetch residual mini-batch
            tf_dict = {self.x_tf: x_res_batch, self.y_tf: y_res_batch,
                       self.v1_tf: v1_res_batch}
            self.sess.run(self.train_op_Adam_v1, tf_dict)
            self.optimizer.minimize(self.sess,
                                    feed_dict = tf_dict,
                                    fetches = [self.loss],
                                    loss_callback = self.callback) 
            it = it + 1
    def predict(self, x_star, y_star): 
        tf_dict = {self.x_tf: x_star, self.y_tf: y_star}
        v1_star = self.sess.run(self.v1_pred, tf_dict)  
        return v1_star

model = NeuralNet(x_train, y_train, v1_train, layers)
 
Nfeval = 1
model.train(end_it)

做这种事情的最佳方式(恕我直言)是通过输出激活函数强制执行。我们可以用一个tf.nn.sigmoid作为边界在[0, 1]之间的基础,稍微移动和缩放它。

def bounded_output(x, lower, upper):
    scale = upper - lower
    return scale * tf.nn.sigmoid(x) + lower

在你的例子中,用 lower=-0.5upper=0.75 来调用它。这将移动 sigmoid,使最低输出为 -0.5,范围为 0.75 + 0.5 = 1.25,上限为 0.75。将此作为输出激活添加到网络的最后一层意味着输出 不能 超出范围。

一个问题:这可能会导致不良梯度,因为函数在接近极限时会饱和。因此,如果您的网络产生的输出接近于这些限制,则梯度会很小并且学习速度会很慢。