SGD - 损失在一些迭代后开始增加
SGD - loss starts increasing after some iterations
我正在尝试实现具有两个约束的随机梯度下降,因此无法使用 scikit-learn。不幸的是,我已经在没有这两个限制的情况下与常规 SGD 作斗争。训练集上的损失(平方损失)在一些迭代中下降,但在一段时间后开始增加,如图所示。
这些是我使用的功能:
def loss_prime_simple(w,node,feature,data):
x = data[3]
y = data[2]
x_f = x[node][feature]
y_node = y[node]
ret = (y_node - w[feature] * x_f) * (-x_f)
return ret
def update_weights(w,data,predecs,children,node, learning_rate):
len_features = len(data[3][0])
w_new = np.zeros(len_features)
for feature_ in range(len_features):
w_new[feature_] = loss_prime_simple(w,node,feature_,data)
return w - learning_rate * w_new
def loss_simple(w,data):
y_p = data[2]
x = data[3]
return ((y_p - np.dot(w,np.array(x).T)) ** 2).sum()
这显示了具有两种不同学习率 (0.001, 0.0001) 的训练集的损失 http://postimg.org/image/43nbmh8x5/
任何人都可以找到错误或有如何调试的建议吗?
谢谢
编辑:
正如 lejlot 指出的那样,如果有数据就好了。
这是我用于 x 的数据(单个样本):http://textuploader.com/5x0f1
y=2
这给了这个损失:http://postimg.org/image/o9d97kt9v/
更新后的代码:
def loss_prime_simple(w,node,feature,data):
x = data[3]
y = data[2]
x_f = x[node][feature]
y_node = y[node]
return -(y_node - w[feature] * x_f) * x_f
def update_weights(w,data,predecs,children,node, learning_rate):
len_features = len(data[3][0])
w_new = np.zeros(len_features)
for feature_ in range(len_features):
w_new[feature_] = loss_prime_simple(w,node,feature_,data)
return w - learning_rate * w_new
def loss_simple2(w,data):
y_p = data[2]
x = data[3]
return ((y_p - np.dot(w,np.array(x).T)) ** 2).sum()
import numpy as np
X = [#put array from http://textuploader.com/5x0f1 here]
y = [2]
data = None, None, y, X
w = np.random.rand(4096)
a = [ loss_simple2(w, data) ]
for _ in range(200):
for j in range(X.shape[0]):
w = update_weights(w,data,None,None,j, 0.0001)
a.append( loss_simple2(w, data) )
from matplotlib import pyplot as plt
plt.figure()
plt.plot(a)
plt.show()
可以注意到的主要错误是您 reshape
而不是 transpose
,比较:
>>> import numpy as np
>>> X = np.array(range(10)).reshape(2,-1)
>>> X
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> X.reshape(-1, 2)
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> X.T
array([[0, 5],
[1, 6],
[2, 7],
[3, 8],
[4, 9]])
>>> X.reshape(-1, 2) == X.T
array([[ True, False],
[False, False],
[False, False],
[False, False],
[False, True]], dtype=bool)
接下来看起来不好的事情是调用 sum( array ),你应该调用 array.sum()
>>> import numpy as np
>>> x = np.array(range(10)).reshape(2, 5)
>>> x
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> sum(x)
array([ 5, 7, 9, 11, 13])
>>> x.sum()
45
这样就可以了
def loss_prime_simple(w,node,feature,data):
x = data[3]
y = data[2]
x_f = x[node][feature]
y_node = y[node]
ret = w[feature]
return -(y_node - w[feature] * x_f) * x_f
def update_weights(w,data,predecs,children,node, learning_rate):
len_features = len(data[3][0])
w_new = np.zeros(len_features)
for feature_ in range(len_features):
w_new[feature_] = loss_prime_simple(w,node,feature_,data)
return w - learning_rate * w_new
def loss_simple(w,data):
y_p = data[2]
x = data[3]
return ((y_p - np.dot(w,np.array(x).T)) ** 2).sum()
import numpy as np
X = np.random.randn(1000, 3)
y = np.random.randn(1000)
data = None, None, y, X
w = np.array([1,3,3])
loss = [loss_simple(w, data)]
for _ in range(20):
for j in range(X.shape[0]):
w = update_weights(w, data, None, None, j, 0.001)
loss.append(loss_simple(w, data))
from matplotlib import pyplot as plt
plt.figure()
plt.plot(loss)
plt.show()
问题是我用 instead of
更新了权重
所以这有效:
def update_weights(w,x,y, learning_rate):
inner_product = 0.0
for f_ in range(len(x)):
inner_product += (w[f_] * x[f_])
dloss = inner_product - y
for f_ in range(len(x)):
w[f_] += (learning_rate * (-x[f_] * dloss))
return w
我正在尝试实现具有两个约束的随机梯度下降,因此无法使用 scikit-learn。不幸的是,我已经在没有这两个限制的情况下与常规 SGD 作斗争。训练集上的损失(平方损失)在一些迭代中下降,但在一段时间后开始增加,如图所示。 这些是我使用的功能:
def loss_prime_simple(w,node,feature,data):
x = data[3]
y = data[2]
x_f = x[node][feature]
y_node = y[node]
ret = (y_node - w[feature] * x_f) * (-x_f)
return ret
def update_weights(w,data,predecs,children,node, learning_rate):
len_features = len(data[3][0])
w_new = np.zeros(len_features)
for feature_ in range(len_features):
w_new[feature_] = loss_prime_simple(w,node,feature_,data)
return w - learning_rate * w_new
def loss_simple(w,data):
y_p = data[2]
x = data[3]
return ((y_p - np.dot(w,np.array(x).T)) ** 2).sum()
这显示了具有两种不同学习率 (0.001, 0.0001) 的训练集的损失 http://postimg.org/image/43nbmh8x5/
任何人都可以找到错误或有如何调试的建议吗? 谢谢
编辑:
正如 lejlot 指出的那样,如果有数据就好了。 这是我用于 x 的数据(单个样本):http://textuploader.com/5x0f1
y=2
这给了这个损失:http://postimg.org/image/o9d97kt9v/
更新后的代码:
def loss_prime_simple(w,node,feature,data):
x = data[3]
y = data[2]
x_f = x[node][feature]
y_node = y[node]
return -(y_node - w[feature] * x_f) * x_f
def update_weights(w,data,predecs,children,node, learning_rate):
len_features = len(data[3][0])
w_new = np.zeros(len_features)
for feature_ in range(len_features):
w_new[feature_] = loss_prime_simple(w,node,feature_,data)
return w - learning_rate * w_new
def loss_simple2(w,data):
y_p = data[2]
x = data[3]
return ((y_p - np.dot(w,np.array(x).T)) ** 2).sum()
import numpy as np
X = [#put array from http://textuploader.com/5x0f1 here]
y = [2]
data = None, None, y, X
w = np.random.rand(4096)
a = [ loss_simple2(w, data) ]
for _ in range(200):
for j in range(X.shape[0]):
w = update_weights(w,data,None,None,j, 0.0001)
a.append( loss_simple2(w, data) )
from matplotlib import pyplot as plt
plt.figure()
plt.plot(a)
plt.show()
可以注意到的主要错误是您 reshape
而不是 transpose
,比较:
>>> import numpy as np
>>> X = np.array(range(10)).reshape(2,-1)
>>> X
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> X.reshape(-1, 2)
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> X.T
array([[0, 5],
[1, 6],
[2, 7],
[3, 8],
[4, 9]])
>>> X.reshape(-1, 2) == X.T
array([[ True, False],
[False, False],
[False, False],
[False, False],
[False, True]], dtype=bool)
接下来看起来不好的事情是调用 sum( array ),你应该调用 array.sum()
>>> import numpy as np
>>> x = np.array(range(10)).reshape(2, 5)
>>> x
array([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]])
>>> sum(x)
array([ 5, 7, 9, 11, 13])
>>> x.sum()
45
这样就可以了
def loss_prime_simple(w,node,feature,data):
x = data[3]
y = data[2]
x_f = x[node][feature]
y_node = y[node]
ret = w[feature]
return -(y_node - w[feature] * x_f) * x_f
def update_weights(w,data,predecs,children,node, learning_rate):
len_features = len(data[3][0])
w_new = np.zeros(len_features)
for feature_ in range(len_features):
w_new[feature_] = loss_prime_simple(w,node,feature_,data)
return w - learning_rate * w_new
def loss_simple(w,data):
y_p = data[2]
x = data[3]
return ((y_p - np.dot(w,np.array(x).T)) ** 2).sum()
import numpy as np
X = np.random.randn(1000, 3)
y = np.random.randn(1000)
data = None, None, y, X
w = np.array([1,3,3])
loss = [loss_simple(w, data)]
for _ in range(20):
for j in range(X.shape[0]):
w = update_weights(w, data, None, None, j, 0.001)
loss.append(loss_simple(w, data))
from matplotlib import pyplot as plt
plt.figure()
plt.plot(loss)
plt.show()
问题是我用
所以这有效:
def update_weights(w,x,y, learning_rate):
inner_product = 0.0
for f_ in range(len(x)):
inner_product += (w[f_] * x[f_])
dloss = inner_product - y
for f_ in range(len(x)):
w[f_] += (learning_rate * (-x[f_] * dloss))
return w