这条 ROC 曲线图看起来很奇怪(sklearn SVC)
The graph of this ROC curve looks strange (sklearn SVC)
所以我用 scikit-learns 支持向量分类器 (svm.SVC) 结合流水线和网格搜索构建了一个小例子。经过拟合和评估,我得到了一条看起来很有趣的ROC曲线:它只弯曲了一次。
我想我会在这里得到更多的曲线形状。谁能解释这种行为?最小工作示例代码:
# Imports
import sklearn as skl
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn import preprocessing
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import metrics
from tempfile import mkdtemp
from shutil import rmtree
from sklearn.externals.joblib import Memory
def plot_roc(y_test, y_pred):
fpr, tpr, thresholds = skl.metrics.roc_curve(y_test, y_pred, pos_label=1)
roc_auc = skl.metrics.auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area ={0:.2f})'.format(roc_auc))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show();
# Generate a random dataset
X, y = skl.datasets.make_classification(n_samples=1400, n_features=11, n_informative=5, n_classes=2, weights=[0.94, 0.06], flip_y=0.05, random_state=42)
X_train, X_test, y_train, y_test = skl.model_selection.train_test_split(X, y, test_size=0.3, random_state=42)
#Instantiate Classifier
normer = preprocessing.Normalizer()
svm1 = svm.SVC(probability=True, class_weight={1: 10})
cached = mkdtemp()
memory = Memory(cachedir=cached, verbose=3)
pipe_1 = Pipeline(steps=[('normalization', normer), ('svm', svm1)], memory=memory)
cv = skl.model_selection.KFold(n_splits=5, shuffle=True, random_state=42)
param_grid = [ {"svm__kernel": ["linear"], "svm__C": [1, 10, 100, 1000]}, {"svm__kernel": ["rbf"], "svm__C": [1, 10, 100, 1000], "svm__gamma": [0.001, 0.0001]} ]
grd = GridSearchCV(pipe_1, param_grid, scoring='roc_auc', cv=cv)
#Training
y_pred = grd.fit(X_train, y_train).predict(X_test)
rmtree(cached)
#Evaluation
confmatrix = skl.metrics.confusion_matrix(y_test, y_pred)
print(confmatrix)
plot_roc(y_test, y_pred)
您的 plot_roc(y_test, y_pred)
函数在内部调用 roc_curve
.
根据 documentation of roc_curve:
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as
returned by “decision_function” on some classifiers).
因此,当 y_pred
是正概率 class 而不是硬预测 classes.
时,此方法效果最佳
试试下面的代码:
y_pred = grd.fit(X_train, y_train).predict_proba(X_test)[:,1]
然后发送y_pred
到plot方法。
所以我用 scikit-learns 支持向量分类器 (svm.SVC) 结合流水线和网格搜索构建了一个小例子。经过拟合和评估,我得到了一条看起来很有趣的ROC曲线:它只弯曲了一次。
我想我会在这里得到更多的曲线形状。谁能解释这种行为?最小工作示例代码:
# Imports
import sklearn as skl
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn import preprocessing
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn import metrics
from tempfile import mkdtemp
from shutil import rmtree
from sklearn.externals.joblib import Memory
def plot_roc(y_test, y_pred):
fpr, tpr, thresholds = skl.metrics.roc_curve(y_test, y_pred, pos_label=1)
roc_auc = skl.metrics.auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange', lw=lw, label='ROC curve (area ={0:.2f})'.format(roc_auc))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show();
# Generate a random dataset
X, y = skl.datasets.make_classification(n_samples=1400, n_features=11, n_informative=5, n_classes=2, weights=[0.94, 0.06], flip_y=0.05, random_state=42)
X_train, X_test, y_train, y_test = skl.model_selection.train_test_split(X, y, test_size=0.3, random_state=42)
#Instantiate Classifier
normer = preprocessing.Normalizer()
svm1 = svm.SVC(probability=True, class_weight={1: 10})
cached = mkdtemp()
memory = Memory(cachedir=cached, verbose=3)
pipe_1 = Pipeline(steps=[('normalization', normer), ('svm', svm1)], memory=memory)
cv = skl.model_selection.KFold(n_splits=5, shuffle=True, random_state=42)
param_grid = [ {"svm__kernel": ["linear"], "svm__C": [1, 10, 100, 1000]}, {"svm__kernel": ["rbf"], "svm__C": [1, 10, 100, 1000], "svm__gamma": [0.001, 0.0001]} ]
grd = GridSearchCV(pipe_1, param_grid, scoring='roc_auc', cv=cv)
#Training
y_pred = grd.fit(X_train, y_train).predict(X_test)
rmtree(cached)
#Evaluation
confmatrix = skl.metrics.confusion_matrix(y_test, y_pred)
print(confmatrix)
plot_roc(y_test, y_pred)
您的 plot_roc(y_test, y_pred)
函数在内部调用 roc_curve
.
根据 documentation of roc_curve:
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by “decision_function” on some classifiers).
因此,当 y_pred
是正概率 class 而不是硬预测 classes.
试试下面的代码:
y_pred = grd.fit(X_train, y_train).predict_proba(X_test)[:,1]
然后发送y_pred
到plot方法。