如何将多个分布列表传递给 sklearn randomizedSearchCV
how to pass multiple lists of distributions to sklearn randomizedSearchCV
我在 Python (mkl_regressor
) 中有一个自定义的估算器对象。这种对象的学习参数之一是 numpy.array
的浮点数。通常 sklearn 估计器对象由单个参数调整,例如 SVM 的 C
。因此,randomizedSearchCV
搜索对象采用分布或值列表来从给定分布(在我的示例中是 scipy.stats.expon
)中为所需参数选取一些值。我试图传递分布列表,但没有成功,因为 randomizedSearchCV
不执行分布数组中的元素。这是我试过的:
from modshogun import *
import Gnuplot, Gnuplot.funcutils
from numpy import *
from sklearn.metrics import r2_score
class mkl_regressor():
def __init__(self, widths = [0.01, 0.1, 1.0, 10.0, 50.0, 100.0], kernel_weights = [0.01, 0.1, 1.0,], svm_c = 0.01, mkl_c = 1.0, svm_norm = 1, mkl_norm = 1, degree = 2):
self.svm_c = svm_c
self.mkl_c = mkl_c
self.svm_norm = svm_norm
self.mkl_norm = mkl_norm
self.degree = degree
self.widths = widths
self.kernel_weights = kernel_weights
def fit(self, X, y, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
self.feats_train = RealFeatures(X.T)
labels_train = RegressionLabels(y.reshape((len(y), )))
self._kernels_ = CombinedKernel()
for width in self.widths:
kernel = GaussianKernel()
kernel.set_width(width)
kernel.init(self.feats_train,self.feats_train)
self._kernels_.append_kernel(kernel)
del kernel
kernel = PolyKernel(10, self.degree)
self._kernels_.append_kernel(kernel)
del kernel
self._kernels_.init(self.feats_train, self.feats_train)
binary_svm_solver = SVRLight()
self.mkl = MKLRegression(binary_svm_solver)
self.mkl.set_C(self.svm_c, self.svm_c)
self.mkl.set_C_mkl(self.mkl_c)
self.mkl.set_mkl_norm(self.mkl_norm)
self.mkl.set_mkl_block_norm(self.svm_norm)
self.mkl.set_kernel(self._kernels_)
self.mkl.set_labels(labels_train)
self.mkl.train()
self.kernel_weights = self._kernels_.get_subkernel_weights()
def predict(self, X):
self.feats_test = RealFeatures(X.T)
self._kernels_.init(self.feats_train, self.feats_test)
self.mkl.set_kernel(self._kernels_)
return self.mkl.apply_regression().get_labels()
def set_params(self, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
return self
def get_params(self, deep=False):
return {param: getattr(self, param) for param in dir(self) if not param.startswith('__') and not callable(getattr(self,param))}
def score(self, X_t, y_t):
predicted = self.predict(X_t)
return r2_score(predicted, y_t)
if __name__ == "__main__":
from sklearn.grid_search import RandomizedSearchCV as RS
from scipy.stats import randint as sp_randint
from scipy.stats import expon
labels = array([2.0,0.0,2.0,1.0,3.0,2.0])
labels = labels.reshape((len(labels), 1))
data = array([[1.0,2.0,3.0],[1.0,2.0,9.0],[1.0,2.0,3.0],[1.0,2.0,0.0],[0.0,2.0,3.0],[1.0,2.0,3.0]])
labels_t = array([1.,3.,4])
labels_t = labels_t.reshape((len(labels_t), 1))
data_t = array([[20.0,30.0,40.0],[10.0,20.0,30.0],[10.0,20.0,40.0]])
k = 3
param_grid = [ {'svm_c': expon(scale=100, loc=5),
'mkl_c': expon(scale=100, loc=5),
'degree': sp_randint(0, 32),
#'widths': [array([4.0,6.0,8.9,3.0]), array([4.0,6.0,8.9,3.0,2.0, 3.0, 4.0]), array( [100.0, 200.0, 300.0, 400.0])
'widths': [[expon, expon]]
}]
mkl = mkl_regressor()
rs = RS(mkl, param_distributions = param_grid[0], n_iter = 10, n_jobs = 24, cv = k)#, scoring="r2", verbose=True)
rs.fit(data, labels)
preds = rs.predict(data_t)
print "R^2: ", rs.score(data_t, labels_t)
print "Parameters: ", rs.best_params_
通过将 numpy 数组作为参数字典的列表 'widths'
的元素传递,上述代码运行良好。但是,当我尝试传递分布列表时,randomizedSearchCV 对象没有按预期响应:
/home/ignacio/distributionalSemanticStabilityThesis/mkl_test.py in fit(self=<__main__.mkl_regressor instance>, X=array([[ 1., 2., 3.],
[ 1., 2., 0.],
[ 0., 2., 3.],
[ 1., 2., 3.]]), y=array([[ 2.],
[ 1.],
[ 3.],
[ 2.]]), **params={})
24 self.feats_train = RealFeatures(X.T)
25 labels_train = RegressionLabels(y.reshape((len(y), )))
26 self._kernels_ = CombinedKernel()
27 for width in self.widths:
28 kernel = GaussianKernel()
---> 29 kernel.set_width(width)
kernel.set_width = <built-in method set_width of GaussianKernel object>
width = <scipy.stats._continuous_distns.expon_gen object>
30 kernel.init(self.feats_train,self.feats_train)
31 self._kernels_.append_kernel(kernel)
32 del kernel
33
TypeError: in method 'GaussianKernel_set_width', argument 2 of type 'float64_t'
我不想强制估计器执行每个分布生成器,因为在这种情况下,randomizedSearchCV
无法控制使用的值。
有什么建议吗?谢谢。
RandomizedSearchCV 可以使用 参数值列表来尝试 或使用 rvs 方法进行采样的 分布对象 。如果您向它传递一个列表,它会假定您传递了一组离散的参数值以从中进行采样。它不支持单个参数的分布列表。如果现有的发行版不适合您的需要,请定制一个。
如果您需要一个 return 数组的分布,只需创建一个 class 具有 rvs() 方法的 return 随机样本并传递该实例而不是单变量分布列表。
@bpachev 建议的解决方案对我有用。分布 class:
class expon_vector(stats.rv_continuous):
def __init__(self, loc = 1.0, scale = 50.0, min_size=2, max_size = 10):
self.loc = loc
self.scale = scale
self.min_size = min_size
self.max_size = max_size
self.size = max_size - min_size # Only for initialization
def rvs(self):
self.size = randint.rvs(low = self.min_size,
high = self.max_size, size = 1)
return expon.rvs(loc = self.loc, scale = self.scale, size = self.size)
它包含在我正在使用的自定义估算器的参数字典中:
param_grid = [ {'svm_c': expon(scale=100, loc=5),
'mkl_c': expon(scale=100, loc=5),
'degree': sp_randint(0, 24),
'widths': expon_vector(loc = 0.1, scale = 100.0,
min_size = 2, max_size = 10) } ]
我在 Python (mkl_regressor
) 中有一个自定义的估算器对象。这种对象的学习参数之一是 numpy.array
的浮点数。通常 sklearn 估计器对象由单个参数调整,例如 SVM 的 C
。因此,randomizedSearchCV
搜索对象采用分布或值列表来从给定分布(在我的示例中是 scipy.stats.expon
)中为所需参数选取一些值。我试图传递分布列表,但没有成功,因为 randomizedSearchCV
不执行分布数组中的元素。这是我试过的:
from modshogun import *
import Gnuplot, Gnuplot.funcutils
from numpy import *
from sklearn.metrics import r2_score
class mkl_regressor():
def __init__(self, widths = [0.01, 0.1, 1.0, 10.0, 50.0, 100.0], kernel_weights = [0.01, 0.1, 1.0,], svm_c = 0.01, mkl_c = 1.0, svm_norm = 1, mkl_norm = 1, degree = 2):
self.svm_c = svm_c
self.mkl_c = mkl_c
self.svm_norm = svm_norm
self.mkl_norm = mkl_norm
self.degree = degree
self.widths = widths
self.kernel_weights = kernel_weights
def fit(self, X, y, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
self.feats_train = RealFeatures(X.T)
labels_train = RegressionLabels(y.reshape((len(y), )))
self._kernels_ = CombinedKernel()
for width in self.widths:
kernel = GaussianKernel()
kernel.set_width(width)
kernel.init(self.feats_train,self.feats_train)
self._kernels_.append_kernel(kernel)
del kernel
kernel = PolyKernel(10, self.degree)
self._kernels_.append_kernel(kernel)
del kernel
self._kernels_.init(self.feats_train, self.feats_train)
binary_svm_solver = SVRLight()
self.mkl = MKLRegression(binary_svm_solver)
self.mkl.set_C(self.svm_c, self.svm_c)
self.mkl.set_C_mkl(self.mkl_c)
self.mkl.set_mkl_norm(self.mkl_norm)
self.mkl.set_mkl_block_norm(self.svm_norm)
self.mkl.set_kernel(self._kernels_)
self.mkl.set_labels(labels_train)
self.mkl.train()
self.kernel_weights = self._kernels_.get_subkernel_weights()
def predict(self, X):
self.feats_test = RealFeatures(X.T)
self._kernels_.init(self.feats_train, self.feats_test)
self.mkl.set_kernel(self._kernels_)
return self.mkl.apply_regression().get_labels()
def set_params(self, **params):
for parameter, value in params.items():
setattr(self, parameter, value)
return self
def get_params(self, deep=False):
return {param: getattr(self, param) for param in dir(self) if not param.startswith('__') and not callable(getattr(self,param))}
def score(self, X_t, y_t):
predicted = self.predict(X_t)
return r2_score(predicted, y_t)
if __name__ == "__main__":
from sklearn.grid_search import RandomizedSearchCV as RS
from scipy.stats import randint as sp_randint
from scipy.stats import expon
labels = array([2.0,0.0,2.0,1.0,3.0,2.0])
labels = labels.reshape((len(labels), 1))
data = array([[1.0,2.0,3.0],[1.0,2.0,9.0],[1.0,2.0,3.0],[1.0,2.0,0.0],[0.0,2.0,3.0],[1.0,2.0,3.0]])
labels_t = array([1.,3.,4])
labels_t = labels_t.reshape((len(labels_t), 1))
data_t = array([[20.0,30.0,40.0],[10.0,20.0,30.0],[10.0,20.0,40.0]])
k = 3
param_grid = [ {'svm_c': expon(scale=100, loc=5),
'mkl_c': expon(scale=100, loc=5),
'degree': sp_randint(0, 32),
#'widths': [array([4.0,6.0,8.9,3.0]), array([4.0,6.0,8.9,3.0,2.0, 3.0, 4.0]), array( [100.0, 200.0, 300.0, 400.0])
'widths': [[expon, expon]]
}]
mkl = mkl_regressor()
rs = RS(mkl, param_distributions = param_grid[0], n_iter = 10, n_jobs = 24, cv = k)#, scoring="r2", verbose=True)
rs.fit(data, labels)
preds = rs.predict(data_t)
print "R^2: ", rs.score(data_t, labels_t)
print "Parameters: ", rs.best_params_
通过将 numpy 数组作为参数字典的列表 'widths'
的元素传递,上述代码运行良好。但是,当我尝试传递分布列表时,randomizedSearchCV 对象没有按预期响应:
/home/ignacio/distributionalSemanticStabilityThesis/mkl_test.py in fit(self=<__main__.mkl_regressor instance>, X=array([[ 1., 2., 3.],
[ 1., 2., 0.],
[ 0., 2., 3.],
[ 1., 2., 3.]]), y=array([[ 2.],
[ 1.],
[ 3.],
[ 2.]]), **params={})
24 self.feats_train = RealFeatures(X.T)
25 labels_train = RegressionLabels(y.reshape((len(y), )))
26 self._kernels_ = CombinedKernel()
27 for width in self.widths:
28 kernel = GaussianKernel()
---> 29 kernel.set_width(width)
kernel.set_width = <built-in method set_width of GaussianKernel object>
width = <scipy.stats._continuous_distns.expon_gen object>
30 kernel.init(self.feats_train,self.feats_train)
31 self._kernels_.append_kernel(kernel)
32 del kernel
33
TypeError: in method 'GaussianKernel_set_width', argument 2 of type 'float64_t'
我不想强制估计器执行每个分布生成器,因为在这种情况下,randomizedSearchCV
无法控制使用的值。
有什么建议吗?谢谢。
RandomizedSearchCV 可以使用 参数值列表来尝试 或使用 rvs 方法进行采样的 分布对象 。如果您向它传递一个列表,它会假定您传递了一组离散的参数值以从中进行采样。它不支持单个参数的分布列表。如果现有的发行版不适合您的需要,请定制一个。
如果您需要一个 return 数组的分布,只需创建一个 class 具有 rvs() 方法的 return 随机样本并传递该实例而不是单变量分布列表。
@bpachev 建议的解决方案对我有用。分布 class:
class expon_vector(stats.rv_continuous):
def __init__(self, loc = 1.0, scale = 50.0, min_size=2, max_size = 10):
self.loc = loc
self.scale = scale
self.min_size = min_size
self.max_size = max_size
self.size = max_size - min_size # Only for initialization
def rvs(self):
self.size = randint.rvs(low = self.min_size,
high = self.max_size, size = 1)
return expon.rvs(loc = self.loc, scale = self.scale, size = self.size)
它包含在我正在使用的自定义估算器的参数字典中:
param_grid = [ {'svm_c': expon(scale=100, loc=5),
'mkl_c': expon(scale=100, loc=5),
'degree': sp_randint(0, 24),
'widths': expon_vector(loc = 0.1, scale = 100.0,
min_size = 2, max_size = 10) } ]