Python梯度下降线性回归计算不好
Pythong gradient descent linear regression not calculating well
我在 coursera 上的 Andrew NG 课程之后开始学习机器学习。我正在尝试实施 gradient descent with linear regression
但我不确定我错过了什么。根据这个
我已经尝试实现它,但出了点问题。这是代码。值得指出的是,这是我第一次接触python,没有学习基础知识。
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
x = [1,2,3,4,5]
y = [1,2,3,4,5]
def Gradient_Descent(x, y, learning_rate, iterations):
theta_1=np.random.randint(low=2, high=5);
theta_0=np.random.randint(low=2, high=5);
m = x.shape[0]
def mean_error(a, b, factor):
sum_mean = 0
for i in range(m):
sum_mean += (theta_0 + theta_1 * a[i]) - b[i] # h(x) = (theta0 + theta1 * x) - y
if factor:
sum_mean *= a[i]
return sum_mean
def perform_cal(theta_0, theta_1, m):
temp_0 = theta_0 - learning_rate * ((1 / m) * mean_error(x, y, False))
temp_1 = theta_1 - learning_rate * ((1 / m) * mean_error(x, y, True))
return temp_0 , temp_1
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(iterations):
theta_0, theta_1 = perform_cal(theta_0, theta_1, m)
ax.clear()
ax.plot(x, y, linestyle='None', marker='o')
ax.plot(x, theta_0 + theta_1*x)
fig.canvas.draw()
x = np.array(x)
y = np.array(y)
Gradient_Descent(x,y, 0.1, 500)
input("Press enter to close program")
我做错了什么?
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
x = [1,2,3,4,5]
y = [1,2,3,4,5]
def Gradient_Descent(x, y, learning_rate, iterations):
theta_1=0
theta_0=0
m = x.shape[0]
for i in range(iterations):
theta_0, theta_1 = perform_cal(theta_0, theta_1, m, learning_rate)
ax.clear()
ax.plot(x, y, linestyle='None', marker='o')
ax.plot(x, theta_0 + theta_1*x)
fig.canvas.draw()
def mean_error(a, b, factor, m, theta_0, theta_1):
sum_mean = 0
for i in range(m):
sum_mean += (theta_0 + theta_1 * a[i]) - b[i] # h(x) = (theta0 + theta1 * x) - y
if factor:
sum_mean *= a[i]
print(sum_mean)
return sum_mean
def perform_cal(theta_0, theta_1, m, learning_rate):
temp_0 = theta_0 - learning_rate * ((1 / m) * mean_error(x, y, False, m, theta_0, theta_1))
temp_1 = theta_1 - learning_rate * ((1 / m) * mean_error(x, y, True, m, theta_0, theta_1))
return temp_0 , temp_1
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.array(x)
y = np.array(y)
Gradient_Descent(x,y, 0.01, 100)
对您的代码进行了一些更改(主要是重新排列了几行,并且没有更改您所做的任何内容,因此看起来不会造成混淆),现在可以正常工作了。我建议您首先学习该语言的基础知识,因为大多数错误都是非常基本的,例如参数传递等。但是,值得称赞的是,您正在按照 Andrew Ng 的课程实施自己尝试一些东西。
我在 coursera 上的 Andrew NG 课程之后开始学习机器学习。我正在尝试实施 gradient descent with linear regression
但我不确定我错过了什么。根据这个
我已经尝试实现它,但出了点问题。这是代码。值得指出的是,这是我第一次接触python,没有学习基础知识。
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
x = [1,2,3,4,5]
y = [1,2,3,4,5]
def Gradient_Descent(x, y, learning_rate, iterations):
theta_1=np.random.randint(low=2, high=5);
theta_0=np.random.randint(low=2, high=5);
m = x.shape[0]
def mean_error(a, b, factor):
sum_mean = 0
for i in range(m):
sum_mean += (theta_0 + theta_1 * a[i]) - b[i] # h(x) = (theta0 + theta1 * x) - y
if factor:
sum_mean *= a[i]
return sum_mean
def perform_cal(theta_0, theta_1, m):
temp_0 = theta_0 - learning_rate * ((1 / m) * mean_error(x, y, False))
temp_1 = theta_1 - learning_rate * ((1 / m) * mean_error(x, y, True))
return temp_0 , temp_1
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(iterations):
theta_0, theta_1 = perform_cal(theta_0, theta_1, m)
ax.clear()
ax.plot(x, y, linestyle='None', marker='o')
ax.plot(x, theta_0 + theta_1*x)
fig.canvas.draw()
x = np.array(x)
y = np.array(y)
Gradient_Descent(x,y, 0.1, 500)
input("Press enter to close program")
我做错了什么?
import numpy as np
import matplotlib.pyplot as plt
plt.ion()
x = [1,2,3,4,5]
y = [1,2,3,4,5]
def Gradient_Descent(x, y, learning_rate, iterations):
theta_1=0
theta_0=0
m = x.shape[0]
for i in range(iterations):
theta_0, theta_1 = perform_cal(theta_0, theta_1, m, learning_rate)
ax.clear()
ax.plot(x, y, linestyle='None', marker='o')
ax.plot(x, theta_0 + theta_1*x)
fig.canvas.draw()
def mean_error(a, b, factor, m, theta_0, theta_1):
sum_mean = 0
for i in range(m):
sum_mean += (theta_0 + theta_1 * a[i]) - b[i] # h(x) = (theta0 + theta1 * x) - y
if factor:
sum_mean *= a[i]
print(sum_mean)
return sum_mean
def perform_cal(theta_0, theta_1, m, learning_rate):
temp_0 = theta_0 - learning_rate * ((1 / m) * mean_error(x, y, False, m, theta_0, theta_1))
temp_1 = theta_1 - learning_rate * ((1 / m) * mean_error(x, y, True, m, theta_0, theta_1))
return temp_0 , temp_1
fig = plt.figure()
ax = fig.add_subplot(111)
x = np.array(x)
y = np.array(y)
Gradient_Descent(x,y, 0.01, 100)
对您的代码进行了一些更改(主要是重新排列了几行,并且没有更改您所做的任何内容,因此看起来不会造成混淆),现在可以正常工作了。我建议您首先学习该语言的基础知识,因为大多数错误都是非常基本的,例如参数传递等。但是,值得称赞的是,您正在按照 Andrew Ng 的课程实施自己尝试一些东西。