I am coding an algorithm for active learning, using L-BFGS algorithm from scipy.optimize. I need to optimize four parameters: alpha
,beta
,W
and gamma
.
However, it does not work, with the error of
optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0=np.array(alpha,beta,W,gamma), fprime=func_grad)
ValueError: only 2 non-keyword arguments accepted
Note that, in last sentence of the code, x0
is the initial guess of the four paramters. If I change to x0=np.array((alpha,beta,W,gamma),dtype=float)
, I got an error of
ValueError: setting an array element with a sequence.
I am not sure why the error happens.
from sys import argv
import numpy as np
import scipy as sp
import pandas as pd
import scipy.stats as sps
num_labeler = 3
num_instance = 5
X = np.array([[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4],[5,5,5,5]])
Z = np.array([1,0,1,0,1])
Y = np.array([[1,0,1],[0,1,0],[0,0,0],[1,1,1],[1,0,0]])
W = np.array([[1,1,1,1],[2,2,2,2],[3,3,3,3]])
gamma = np.ones(5)
alpha = np.ones(4)
beta = 1
def log_p_y_xz(yit,zi,sigmati): #log P(y_it|x_i,z_i)
return np.log(sps.norm(zi,sigmati).pdf(yit))#tested
def log_p_z_x(alpha,beta,xi): #log P(z_i=1|x_i)
return -np.log(1+np.exp(-np.dot(alpha,xi)-beta))#tested
def sigma_eta_ti(xi, w_t, gamma_t): # 1+exp(-w_t x_i -gamma_t)^-1
return 1/(1+np.exp(-np.dot(xi,w_t)-gamma_t)) #tested
def df_alpha(X,Y,Z,W,alpha,beta,gamma):#df/dalpha
return np.sum((2/(1+np.exp(-np.dot(alpha,X[i])-beta))-1)*np.exp(-np.dot(alpha,X[i])-beta)*X[i]/(1+np.exp(-np.dot(alpha,X[i])-beta))**2 for i in range (num_instance))
def df_beta(X,Y,Z,W,alpha,beta,gamma):#df/dbelta
return np.sum((2/(1+np.exp(-np.dot(alpha,X[i])-beta))-1)*np.exp(-np.dot(alpha,X[i])-beta)/(1+np.exp(-np.dot(alpha,X[i])-beta))**2 for i in range (num_instance))
def df_w(X,Y,Z,W,alpha,beta,gamma):#df/sigma * sigma/dw
return np.sum(np.sum((-3)*(Y[i][t]**2-(-np.log(1+np.exp(-np.dot(alpha,X[i])-beta)))*(2*Y[i][t]-1))*(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**4)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))*X[i]+(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**2)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))*X[i]for t in range(num_labeler)) for i in range (num_instance))
def df_gamma(X,Y,Z,W,alpha,beta,gamma):#df/sigma * sigma/dgamma
return np.sum(np.sum((-3)*(Y[i][t]**2-(-np.log(1+np.exp(-np.dot(alpha,X[i])-beta)))*(2*Y[i][t]-1))*(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**4)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))+(1/(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))**2)*(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t])))*(1-(1/(1+np.exp(-np.dot(X[i],W[t])-gamma[t]))))for t in range(num_labeler)) for i in range (num_instance))
def func(para, *args):
#the function to minimize
#parameters
alpha = para[0]#alpha should be an array
beta = para[1]
W = para[2]
gamma = para[3]
#args
X = args [0]
Y = args[1]
Z = args[2]
return np.sum(np.sum(log_p_y_xz(Y[i][t], Z[i], sigma_eta_ti(X[i],W[t],gamma[t]))+log_p_z_x(alpha, beta, X[i]) for t in range(num_labeler)) for i in range (num_instance))
def func_grad(para, *args):
#para have 4 values
alpha = para[0]#alpha should be an array
beta = para[1]
W = para[2]
gamma = para[3]
#args
X = args [0]
Y = args[1]
Z = args[2]
#gradiants
d_f_a = df_alpha(X,Y,Z,W,alpha,beta,gamma)
d_f_b = df_beta(X,Y,Z,W,alpha,beta,gamma)
d_f_w = df_w(X,Y,Z,W,alpha,beta,gamma)
d_f_g = df_gamma(X,Y,Z,W,alpha,beta,gamma)
return np.array([d_f_a, d_f_b,d_f_w,d_f_g])
optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0 =np.array(alpha,beta,W,gamma), fprime = func_grad)
The scipy optimization routines can only optimize a one-dimensional vector of parameters. It looks like you're trying to optimize a tuple of values which contain a mix of scalars, vectors, and matrices.
What you'll want to do is flatten all the relevant parameter values into a one-dimensional array, and have your func
unwrap and set these appropriately.
Edit: I'd proceed by creating a convenience function which extracts your parameters; for example:
def get_params(para):
# extract parameters from 1D parameter vector
assert len(para) == 22
alpha = para[0:4]
beta = para[4]
W = para[5:17].reshape(3, 4)
gamma = para[17:]
return alpha, beta, gamma, W
def func(para, *args):
#the function to minimize
#parameters
alpha, beta, gamma, W = get_params(para)
#args
X = args [0]
Y = args[1]
Z = args[2]
return np.sum(np.sum(log_p_y_xz(Y[i][t], Z[i], sigma_eta_ti(X[i],W[t],gamma[t]))+log_p_z_x(alpha, beta, X[i]) for t in range(num_labeler)) for i in range (num_instance))
def func_grad(para, *args):
#para have 4 values
alpha, beta, gamma, W = get_params(para)
#args
X = args [0]
Y = args[1]
Z = args[2]
#gradiants
d_f_a = df_alpha(X,Y,Z,W,alpha,beta,gamma)
d_f_b = df_beta(X,Y,Z,W,alpha,beta,gamma)
d_f_w = df_w(X,Y,Z,W,alpha,beta,gamma)
d_f_g = df_gamma(X,Y,Z,W,alpha,beta,gamma)
return np.array([d_f_a, d_f_b,d_f_w,d_f_g])
x0 = np.concatenate([np.ravel(alpha), np.ravel(beta), np.ravel(W), np.ravel(gamma)])
optimLogitLBFGS = sp.optimize.fmin_l_bfgs_b(func, x0=x0, fprime=func_grad)
It doesn't run because your func()
and func_grad()
require additional arguments which are not specified in your code snippet, but this change solves the specific problem you were asking about.