优化PythonScipy.optimize.fmin_l_bfgs_b中的四个参数,报错

To optimize four parameters in Python Scipy.optimize.fmin_l_bfgs_b, with an error

我正在编写一个用于主动学习的算法,使用 scipy.optimize 中的 L-BFGS 算法。我需要优化四个参数:alphabetaWgamma

但是还是不行,报错

optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0=np.array(alpha,beta,W,gamma), fprime=func_grad)
ValueError: only 2 non-keyword arguments accepted   

注意,在代码的最后一句中,x0是四个参数的初始猜测。如果我更改为 x0=np.array((alpha,beta,W,gamma),dtype=float),我会收到

的错误
ValueError: setting an array element with a sequence.

我不确定为什么会发生错误。

from sys import argv
import numpy as np
import scipy as sp
import pandas as pd
import scipy.stats as sps

num_labeler = 3
num_instance = 5

X = np.array([[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4],[5,5,5,5]])
Z = np.array([1,0,1,0,1])
Y = np.array([[1,0,1],[0,1,0],[0,0,0],[1,1,1],[1,0,0]])

W = np.array([[1,1,1,1],[2,2,2,2],[3,3,3,3]])
gamma = np.ones(5)
alpha = np.ones(4)
beta = 1


def log_p_y_xz(yit,zi,sigmati): #log P(y_it|x_i,z_i)
    return np.log(sps.norm(zi,sigmati).pdf(yit))#tested

def log_p_z_x(alpha,beta,xi): #log P(z_i=1|x_i)
    return -np.log(1+np.exp(-np.dot(alpha,xi)-beta))#tested

def sigma_eta_ti(xi, w_t, gamma_t): # 1+exp(-w_t x_i -gamma_t)^-1
    return 1/(1+np.exp(-np.dot(xi,w_t)-gamma_t)) #tested

def df_alpha(X,Y,Z,W,alpha,beta,gamma):#df/dalpha
    return np.sum((2/(1+np.exp(-np.dot(alpha,X[i])-beta))-1)*np.exp(-np.dot(alpha,X[i])-beta)*X[i]/(1+np.exp(-np.dot(alpha,X[i])-beta))**2 for i in range (num_instance))

def df_beta(X,Y,Z,W,alpha,beta,gamma):#df/dbelta
    return np.sum((2/(1+np.exp(-np.dot(alpha,X[i])-beta))-1)*np.exp(-np.dot(alpha,X[i])-beta)/(1+np.exp(-np.dot(alpha,X[i])-beta))**2 for i in range (num_instance))

def df_w(X,Y,Z,W,alpha,beta,gamma):#df/sigma * sigma/dw
    return np.sum(np.sum((-3)*(Y[i][t]**2-(-np.log(1+np.exp(-np.dot(alpha,X[i])-beta)))*(2*Y[i][t]-1))*(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**4)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))*X[i]+(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**2)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))*X[i]for t in range(num_labeler)) for i in range (num_instance))

def df_gamma(X,Y,Z,W,alpha,beta,gamma):#df/sigma * sigma/dgamma
    return np.sum(np.sum((-3)*(Y[i][t]**2-(-np.log(1+np.exp(-np.dot(alpha,X[i])-beta)))*(2*Y[i][t]-1))*(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**4)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))+(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**2)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))for t in range(num_labeler)) for i in range (num_instance))

def func(para, *args):
    #the function to minimize
    #parameters
    alpha = para[0]#alpha should be an array
    beta = para[1]
    W = para[2]
    gamma = para[3]
    #args
    X = args [0]
    Y = args[1]
    Z = args[2]        
    return  np.sum(np.sum(log_p_y_xz(Y[i][t], Z[i], sigma_eta_ti(X[i],W[t],gamma[t]))+log_p_z_x(alpha, beta, X[i]) for t in range(num_labeler)) for i in range (num_instance))


def func_grad(para, *args):
    #para have 4 values
    alpha = para[0]#alpha should be an array
    beta = para[1]
    W = para[2]
    gamma = para[3]
    #args
    X = args [0]
    Y = args[1]
    Z = args[2]
    #gradiants
    d_f_a = df_alpha(X,Y,Z,W,alpha,beta,gamma)
    d_f_b = df_beta(X,Y,Z,W,alpha,beta,gamma)
    d_f_w = df_w(X,Y,Z,W,alpha,beta,gamma)
    d_f_g = df_gamma(X,Y,Z,W,alpha,beta,gamma)
    return np.array([d_f_a, d_f_b,d_f_w,d_f_g])


optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0 =np.array(alpha,beta,W,gamma), fprime = func_grad)

scipy优化例程只能优化一维向量参数。看起来您正在尝试优化包含标量、向量和矩阵混合的值元组。

您要做的是将所有相关参数值展平到一个一维数组中,然后 func 解包并适当地设置它们。


编辑:我将创建一个方便的函数来提取您的参数;例如:

def get_params(para):
    # extract parameters from 1D parameter vector
    assert len(para) == 22
    alpha = para[0:4]
    beta = para[4]
    W = para[5:17].reshape(3, 4)
    gamma = para[17:]
    return alpha, beta, gamma, W

def func(para, *args):
    #the function to minimize
    #parameters
    alpha, beta, gamma, W = get_params(para)

    #args
    X = args [0]
    Y = args[1]
    Z = args[2]        
    return  np.sum(np.sum(log_p_y_xz(Y[i][t], Z[i], sigma_eta_ti(X[i],W[t],gamma[t]))+log_p_z_x(alpha, beta, X[i]) for t in range(num_labeler)) for i in range (num_instance))


def func_grad(para, *args):
    #para have 4 values
    alpha, beta, gamma, W = get_params(para)

    #args
    X = args [0]
    Y = args[1]
    Z = args[2]
    #gradiants
    d_f_a = df_alpha(X,Y,Z,W,alpha,beta,gamma)
    d_f_b = df_beta(X,Y,Z,W,alpha,beta,gamma)
    d_f_w = df_w(X,Y,Z,W,alpha,beta,gamma)
    d_f_g = df_gamma(X,Y,Z,W,alpha,beta,gamma)
    return np.array([d_f_a, d_f_b,d_f_w,d_f_g])


x0 = np.concatenate([np.ravel(alpha), np.ravel(beta), np.ravel(W), np.ravel(gamma)])
optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0=x0, fprime=func_grad)

它不会 运行 因为您的 func()func_grad() 需要您的代码片段中未指定的额外参数,但此更改解决了您询问的具体问题.