MLR - 使用嵌套重采样的基准实验。如何访问内部重采样调整结果?
MLR - Benchmark Experiment using nested resampling. How to access the inner resampling tuning results?
我正在对一项任务使用基准实验。我正在使用嵌套重采样策略 (https://mlr-org.github.io/mlr-tutorial/devel/html/nested_resampling/index.html)。
我使用内部重采样策略创建了一个学习器。例如,这里是 c50 的粗略版本:
### C50 ############################################################################################################################
classif_c50 = makeLearner("classif.C50", predict.type="prob")
##The wrappers are presented in reverse order of application
###One-Hot Encoding
classif_c50 = makeDummyFeaturesWrapper(classif_c50, method = "1-of-n")
###Missing Data Imputation
classif_c50 = makeImputeWrapper(classif_c50, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))
##### Tuning #####
inner_resamp = makeResampleDesc("CV", iters=3)
ctrl = makeTuneControlRandom(maxit=3L)
hypss = makeParamSet(
makeIntegerParam("trials", lower = 1, upper = 30)
,makeNumericParam("CF", lower = 0, upper = 1)
)
classif_c50 = makeTuneWrapper(classif_c50, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)
### C50 ############################################################################################################################
然后我使用如下外部重采样策略创建基准实验(bench_data 是我的 data.frame):
outer_resampling = makeFixedHoldoutInstance(train_indices, valid_indices, nrow(bench_data))
trainTask = makeClassifTask(id=training_task_name, data=bench_data, target=target_feature, positive=1, fixup.data="warn", check.data=TRUE)
res = benchmark(tasks = trainTask, learners = lrns, resampling = outer_resampling, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info = TRUE)
我找不到使用 getBMR<> 函数提取内部重采样结果的方法?有没有我想念的方法?
编辑:可重现的例子
# Required Packages
# Load required packages
library(mlr)
#library(dplyr)
library(parallelMap)
library(parallel)
# Algorithms
iterations = 10L
cv_iters = 2
### classif.gamboost ############################################################################################################################
classif_gamboost = makeLearner("classif.gamboost", predict.type="prob")
##The wrappers are presented in reverse order of application
###One-Hot Encoding
classif_gamboost = makeDummyFeaturesWrapper(classif_gamboost, method = "1-of-n")
###Missing Data Imputation
classif_gamboost = makeImputeWrapper(classif_gamboost, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))
##### Tuning #####
inner_resamp = makeResampleDesc("CV", iters=cv_iters)
ctrl = makeTuneControlRandom(maxit=iterations)
hypss = makeParamSet(
makeDiscreteParam("baselearner", values=c("btree")), #,"bols","btree","bbs"
makeIntegerParam("dfbase", lower = 1, upper = 5),
makeDiscreteParam("family", values=c("Binomial")),
makeDiscreteParam("mstop", values=c(10,50,100,250,500,1000))
)
classif_gamboost = makeTuneWrapper(classif_gamboost, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)
### classif.gamboost ############################################################################################################################
### Random Forest ############################################################################################################################
classif_rforest = makeLearner("classif.randomForestSRC", predict.type="prob")
##The wrappers are presented in reverse order of application
###One-Hot Encoding
classif_rforest = makeDummyFeaturesWrapper(classif_rforest, method = "1-of-n")
###Missing Data Imputation
classif_rforest = makeImputeWrapper(classif_rforest, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))
##### Tuning #####
inner_resamp = makeResampleDesc("CV", iters=cv_iters)
ctrl = makeTuneControlRandom(maxit=iterations)
hypss = makeParamSet(
makeIntegerParam("mtry", lower = 1, upper = 30)
,makeIntegerParam("ntree", lower = 100, upper = 500)
,makeIntegerParam("nodesize", lower = 1, upper = 100)
)
classif_rforest = makeTuneWrapper(classif_rforest, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)
### Random Forest ############################################################################################################################
trainData = mtcars
target_feature = "am"
training_task_name = "trainingTask"
trainData[[target_feature]] = as.factor(trainData[[target_feature]])
trainTask = makeClassifTask(id=training_task_name, data=trainData, target=target_feature, positive=1, fixup.data="warn", check.data=TRUE)
train_indices = 1:25
valid_indices = 26:32
outer_resampling = makeFixedHoldoutInstance(train_indices, valid_indices, nrow(trainData))
no_of_cores = detectCores()
parallelStartSocket(no_of_cores, level=c("mlr.tuneParams"), logging = TRUE)
lrns = list(classif_gamboost, classif_rforest)
res = benchmark(tasks = trainTask, learners = lrns, resampling = outer_resampling, measures = list(logloss, auc, f1, ber, acc, bac, mmce, timetrain), show.info = TRUE)
parallelStop()
getBMRPerformances(res, as.df=TRUE)
这里有两种从基准对象中提取优化路径的方法:
通过获取基准调整结果:
z <- getBMRTuneResults(res)
然后遍历每个调优结果的优化路径,并使用generateHyperParsEffectData
:
提取超参数效果
lapply(z$trainingTask, function(x) generateHyperParsEffectData(x[[1]], partial.dep = T))
或者只是为了获取数据:
lapply(z$trainingTask, function(x) generateHyperParsEffectData(x[[1]], partial.dep = T)$data)
或者通过获取 BMR 模型然后提取调谐结果对@Giuseppe 在评论中的建议进行一些修改:
models <- getBMRModels(res, drop = TRUE)
tune.result = lapply(models, function(x) getTuneResult(x[[1]]))
lapply(tune.result, function(x) as.data.frame(x$opt.path))
我正在对一项任务使用基准实验。我正在使用嵌套重采样策略 (https://mlr-org.github.io/mlr-tutorial/devel/html/nested_resampling/index.html)。 我使用内部重采样策略创建了一个学习器。例如,这里是 c50 的粗略版本:
### C50 ############################################################################################################################
classif_c50 = makeLearner("classif.C50", predict.type="prob")
##The wrappers are presented in reverse order of application
###One-Hot Encoding
classif_c50 = makeDummyFeaturesWrapper(classif_c50, method = "1-of-n")
###Missing Data Imputation
classif_c50 = makeImputeWrapper(classif_c50, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))
##### Tuning #####
inner_resamp = makeResampleDesc("CV", iters=3)
ctrl = makeTuneControlRandom(maxit=3L)
hypss = makeParamSet(
makeIntegerParam("trials", lower = 1, upper = 30)
,makeNumericParam("CF", lower = 0, upper = 1)
)
classif_c50 = makeTuneWrapper(classif_c50, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)
### C50 ############################################################################################################################
然后我使用如下外部重采样策略创建基准实验(bench_data 是我的 data.frame):
outer_resampling = makeFixedHoldoutInstance(train_indices, valid_indices, nrow(bench_data))
trainTask = makeClassifTask(id=training_task_name, data=bench_data, target=target_feature, positive=1, fixup.data="warn", check.data=TRUE)
res = benchmark(tasks = trainTask, learners = lrns, resampling = outer_resampling, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info = TRUE)
我找不到使用 getBMR<> 函数提取内部重采样结果的方法?有没有我想念的方法?
编辑:可重现的例子
# Required Packages
# Load required packages
library(mlr)
#library(dplyr)
library(parallelMap)
library(parallel)
# Algorithms
iterations = 10L
cv_iters = 2
### classif.gamboost ############################################################################################################################
classif_gamboost = makeLearner("classif.gamboost", predict.type="prob")
##The wrappers are presented in reverse order of application
###One-Hot Encoding
classif_gamboost = makeDummyFeaturesWrapper(classif_gamboost, method = "1-of-n")
###Missing Data Imputation
classif_gamboost = makeImputeWrapper(classif_gamboost, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))
##### Tuning #####
inner_resamp = makeResampleDesc("CV", iters=cv_iters)
ctrl = makeTuneControlRandom(maxit=iterations)
hypss = makeParamSet(
makeDiscreteParam("baselearner", values=c("btree")), #,"bols","btree","bbs"
makeIntegerParam("dfbase", lower = 1, upper = 5),
makeDiscreteParam("family", values=c("Binomial")),
makeDiscreteParam("mstop", values=c(10,50,100,250,500,1000))
)
classif_gamboost = makeTuneWrapper(classif_gamboost, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)
### classif.gamboost ############################################################################################################################
### Random Forest ############################################################################################################################
classif_rforest = makeLearner("classif.randomForestSRC", predict.type="prob")
##The wrappers are presented in reverse order of application
###One-Hot Encoding
classif_rforest = makeDummyFeaturesWrapper(classif_rforest, method = "1-of-n")
###Missing Data Imputation
classif_rforest = makeImputeWrapper(classif_rforest, classes = list(numeric = imputeConstant(-99999), integer = imputeConstant(-99999), factor = imputeConstant("==Missing==")), dummy.type = "numeric", dummy.classes = c("numeric","integer"))
##### Tuning #####
inner_resamp = makeResampleDesc("CV", iters=cv_iters)
ctrl = makeTuneControlRandom(maxit=iterations)
hypss = makeParamSet(
makeIntegerParam("mtry", lower = 1, upper = 30)
,makeIntegerParam("ntree", lower = 100, upper = 500)
,makeIntegerParam("nodesize", lower = 1, upper = 100)
)
classif_rforest = makeTuneWrapper(classif_rforest, resampling = inner_resamp, par.set = hypss, control = ctrl, measures = list(auc, logloss, f1, ber, acc, bac, mmce, timetrain), show.info=TRUE)
### Random Forest ############################################################################################################################
trainData = mtcars
target_feature = "am"
training_task_name = "trainingTask"
trainData[[target_feature]] = as.factor(trainData[[target_feature]])
trainTask = makeClassifTask(id=training_task_name, data=trainData, target=target_feature, positive=1, fixup.data="warn", check.data=TRUE)
train_indices = 1:25
valid_indices = 26:32
outer_resampling = makeFixedHoldoutInstance(train_indices, valid_indices, nrow(trainData))
no_of_cores = detectCores()
parallelStartSocket(no_of_cores, level=c("mlr.tuneParams"), logging = TRUE)
lrns = list(classif_gamboost, classif_rforest)
res = benchmark(tasks = trainTask, learners = lrns, resampling = outer_resampling, measures = list(logloss, auc, f1, ber, acc, bac, mmce, timetrain), show.info = TRUE)
parallelStop()
getBMRPerformances(res, as.df=TRUE)
这里有两种从基准对象中提取优化路径的方法:
通过获取基准调整结果:
z <- getBMRTuneResults(res)
然后遍历每个调优结果的优化路径,并使用generateHyperParsEffectData
:
lapply(z$trainingTask, function(x) generateHyperParsEffectData(x[[1]], partial.dep = T))
或者只是为了获取数据:
lapply(z$trainingTask, function(x) generateHyperParsEffectData(x[[1]], partial.dep = T)$data)
或者通过获取 BMR 模型然后提取调谐结果对@Giuseppe 在评论中的建议进行一些修改:
models <- getBMRModels(res, drop = TRUE)
tune.result = lapply(models, function(x) getTuneResult(x[[1]]))
lapply(tune.result, function(x) as.data.frame(x$opt.path))