在 python 中实施 SVM One-vs-all 时出现问题
Something wrong when implementing SVM One-vs-all in python
我试图通过比较函数 OneVsRestClassifier
与我自己的实现来验证我是否正确理解了 SVM - OVA(一对一)的工作原理。
在下面的代码中,我在训练阶段实现了 num_classes
个分类器,然后在测试集上对所有分类器进行了测试,并选择了返回最高概率值的分类器。
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score,classification_report
from sklearn.preprocessing import scale
# Read dataset
df = pd.read_csv('In/winequality-white.csv', delimiter=';')
X = df.loc[:, df.columns != 'quality']
Y = df.loc[:, df.columns == 'quality']
my_classes = np.unique(Y)
num_classes = len(my_classes)
# Train-test split
np.random.seed(42)
msk = np.random.rand(len(df)) <= 0.8
train = df[msk]
test = df[~msk]
# From dataset to features and labels
X_train = train.loc[:, train.columns != 'quality']
Y_train = train.loc[:, train.columns == 'quality']
X_test = test.loc[:, test.columns != 'quality']
Y_test = test.loc[:, test.columns == 'quality']
# Models
clf = [None] * num_classes
for k in np.arange(0,num_classes):
my_model = SVC(gamma='auto', C=1000, kernel='rbf', class_weight='balanced', probability=True)
clf[k] = my_model.fit(X_train, Y_train==my_classes[k])
# Prediction
prob_table = np.zeros((len(Y_test), num_classes))
for k in np.arange(0,num_classes):
p = clf[k].predict_proba(X_test)
prob_table[:,k] = p[:,list(clf[k].classes_).index(True)]
Y_pred = prob_table.argmax(axis=1)
print("Test accuracy = ", accuracy_score( Y_test, Y_pred) * 100,"\n\n")
测试精度等于0.21,而当使用函数OneVsRestClassifier
时,它returns 0.59。为了完整起见,我还报告了其他代码(预处理步骤与之前相同):
....
clf = OneVsRestClassifier(SVC(gamma='auto', C=1000, kernel='rbf', class_weight='balanced'))
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
print("Test accuracy = ", accuracy_score( Y_test, Y_pred) * 100,"\n\n")
我自己实现的SVM-OVA有什么问题吗?
Is there something wrong in my own implementation of SVM - OVA?
您有唯一的 classes array([3, 4, 5, 6, 7, 8, 9])
,但是行 Y_pred = prob_table.argmax(axis=1)
假定它们是 0 索引的。
尝试重构您的代码,以减少因这样的假设而出错的可能性:
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score,classification_report
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
df = pd.read_csv('winequality-white.csv', delimiter=';')
y = df["quality"]
my_classes = np.unique(y)
X = df.drop("quality", axis=1)
X_train, X_test, Y_train, Y_test = train_test_split(X,y, random_state=42)
# Models
clfs = []
for k in my_classes:
my_model = SVC(gamma='auto', C=1000, kernel='rbf', class_weight='balanced'
, probability=True, random_state=42)
clfs.append(my_model.fit(X_train, Y_train==k))
# Prediction
prob_table = np.zeros((len(X_test),len(my_classes)))
for i,clf in enumerate(clfs):
probs = clf.predict_proba(X_test)[:,1]
prob_table[:,i] = probs
Y_pred = my_classes[prob_table.argmax(1)]
print("Test accuracy = ", accuracy_score(Y_test, Y_pred) * 100,)
from sklearn.multiclass import OneVsRestClassifier
clf = OneVsRestClassifier(SVC(gamma='auto', C=1000, kernel='rbf'
,class_weight='balanced', random_state=42))
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
print("Test accuracy = ", accuracy_score(Y_test, Y_pred) * 100,)
Test accuracy = 61.795918367346935
Test accuracy = 58.93877551020408
请注意基于概率的 OVR 与基于标签的 OVR 之间的差异,前者粒度更细,结果更好。
对于进一步的实验,您可能希望将 classifier 包装成可重用的 class:
class OVRBinomial(BaseEstimator, ClassifierMixin):
def __init__(self, cls):
self.cls = cls
def fit(self, X, y, **kwargs):
self.classes_ = np.unique(y)
self.clfs_ = []
for c in self.classes_:
clf = self.cls(**kwargs)
clf.fit(X, y == c)
self.clfs_.append(clf)
return self
def predict(self, X, **kwargs):
probs = np.zeros((len(X), len(self.classes_)))
for i, c in enumerate(self.classes_):
prob = self.clfs_[i].predict_proba(X, **kwargs)[:, 1]
probs[:, i] = prob
idx_max = np.argmax(probs, 1)
return self.classes_[idx_max]
您的代码预测部分有误。使用命令 Y_pred = prob_table.argmax(axis=1)
,您将获得概率最大的列的索引。但是您想拥有概率最大的 class 而不是列索引:
Y_pred = my_classes[prob_table.argmax(axis=1)]
one-vs-rest 的基础是预测“one”的概率 class(忽略“rest”的概率 class),然后用最高的概率。 pandas
可以通过取 .idxmax
来做到这一点,returns 是概率最高的列名。
这应该有效:
import pandas
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
# Read/load dataset
dataset = load_wine()
X = dataset["data"]
y = dataset["target"]
classes = {
key: value
for key, value in zip(range(len(dataset["target_names"])), dataset["target_names"])
}
# Create a train/test split (training set is 80% of the data, make sure the different classes are balanced across train and test)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=43, shuffle=True, stratify=y
)
# Create a set of models
estimators = {}
for class_number, class_name in classes.items():
# Create a model
estimator = SVC(
gamma="auto", C=1000, kernel="rbf", class_weight="balanced", probability=True
)
# Fit the model, make sure y is 1 if the class is the target for this estimator, otherwise (rest) 0
estimator = estimator.fit(
X_train, [1 if element == class_number else 0 for element in y_train]
)
# Store the trained model
estimators[class_number] = estimator
# Make predictions
prediction_probabilities = {}
for class_number, estimator in estimators.items():
# Every estimator predicts the probability for their target class
prediction_probabilities[class_number] = estimator.predict_proba(X_test)[:, 1]
# Combine the probabilities into a dataframe
prediction_probabilities_df = pandas.DataFrame(prediction_probabilities)
# The prediction for each row is the column with the highest probability
y_pred = prediction_probabilities_df.idxmax(axis=1)
# Calculate the test accuracy
accuracy = accuracy_score(y_test, y_pred) * 100
print(f"Test accuracy (custom OneVsRest): {accuracy}")
# Create the model
clf = OneVsRestClassifier(
SVC(gamma="auto", C=1000, kernel="rbf", class_weight="balanced")
)
clf.fit(X_train, y_train)
# Make predictions
y_pred = clf.predict(X_test)
# Calculate the test accuracy
accuracy = accuracy_score(y_test, y_pred) * 100
print(f"Test accuracy (Scikit-Learn OneVsRest): {accuracy}")
输出:
Test accuracy (custom OneVsRest): 47.22222222222222
Test accuracy (Scikit-Learn OneVsRest): 41.66666666666667
我试图通过比较函数 OneVsRestClassifier
与我自己的实现来验证我是否正确理解了 SVM - OVA(一对一)的工作原理。
在下面的代码中,我在训练阶段实现了 num_classes
个分类器,然后在测试集上对所有分类器进行了测试,并选择了返回最高概率值的分类器。
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score,classification_report
from sklearn.preprocessing import scale
# Read dataset
df = pd.read_csv('In/winequality-white.csv', delimiter=';')
X = df.loc[:, df.columns != 'quality']
Y = df.loc[:, df.columns == 'quality']
my_classes = np.unique(Y)
num_classes = len(my_classes)
# Train-test split
np.random.seed(42)
msk = np.random.rand(len(df)) <= 0.8
train = df[msk]
test = df[~msk]
# From dataset to features and labels
X_train = train.loc[:, train.columns != 'quality']
Y_train = train.loc[:, train.columns == 'quality']
X_test = test.loc[:, test.columns != 'quality']
Y_test = test.loc[:, test.columns == 'quality']
# Models
clf = [None] * num_classes
for k in np.arange(0,num_classes):
my_model = SVC(gamma='auto', C=1000, kernel='rbf', class_weight='balanced', probability=True)
clf[k] = my_model.fit(X_train, Y_train==my_classes[k])
# Prediction
prob_table = np.zeros((len(Y_test), num_classes))
for k in np.arange(0,num_classes):
p = clf[k].predict_proba(X_test)
prob_table[:,k] = p[:,list(clf[k].classes_).index(True)]
Y_pred = prob_table.argmax(axis=1)
print("Test accuracy = ", accuracy_score( Y_test, Y_pred) * 100,"\n\n")
测试精度等于0.21,而当使用函数OneVsRestClassifier
时,它returns 0.59。为了完整起见,我还报告了其他代码(预处理步骤与之前相同):
....
clf = OneVsRestClassifier(SVC(gamma='auto', C=1000, kernel='rbf', class_weight='balanced'))
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
print("Test accuracy = ", accuracy_score( Y_test, Y_pred) * 100,"\n\n")
我自己实现的SVM-OVA有什么问题吗?
Is there something wrong in my own implementation of SVM - OVA?
您有唯一的 classes array([3, 4, 5, 6, 7, 8, 9])
,但是行 Y_pred = prob_table.argmax(axis=1)
假定它们是 0 索引的。
尝试重构您的代码,以减少因这样的假设而出错的可能性:
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score,classification_report
from sklearn.preprocessing import scale
from sklearn.model_selection import train_test_split
df = pd.read_csv('winequality-white.csv', delimiter=';')
y = df["quality"]
my_classes = np.unique(y)
X = df.drop("quality", axis=1)
X_train, X_test, Y_train, Y_test = train_test_split(X,y, random_state=42)
# Models
clfs = []
for k in my_classes:
my_model = SVC(gamma='auto', C=1000, kernel='rbf', class_weight='balanced'
, probability=True, random_state=42)
clfs.append(my_model.fit(X_train, Y_train==k))
# Prediction
prob_table = np.zeros((len(X_test),len(my_classes)))
for i,clf in enumerate(clfs):
probs = clf.predict_proba(X_test)[:,1]
prob_table[:,i] = probs
Y_pred = my_classes[prob_table.argmax(1)]
print("Test accuracy = ", accuracy_score(Y_test, Y_pred) * 100,)
from sklearn.multiclass import OneVsRestClassifier
clf = OneVsRestClassifier(SVC(gamma='auto', C=1000, kernel='rbf'
,class_weight='balanced', random_state=42))
clf.fit(X_train, Y_train)
Y_pred = clf.predict(X_test)
print("Test accuracy = ", accuracy_score(Y_test, Y_pred) * 100,)
Test accuracy = 61.795918367346935
Test accuracy = 58.93877551020408
请注意基于概率的 OVR 与基于标签的 OVR 之间的差异,前者粒度更细,结果更好。
对于进一步的实验,您可能希望将 classifier 包装成可重用的 class:
class OVRBinomial(BaseEstimator, ClassifierMixin):
def __init__(self, cls):
self.cls = cls
def fit(self, X, y, **kwargs):
self.classes_ = np.unique(y)
self.clfs_ = []
for c in self.classes_:
clf = self.cls(**kwargs)
clf.fit(X, y == c)
self.clfs_.append(clf)
return self
def predict(self, X, **kwargs):
probs = np.zeros((len(X), len(self.classes_)))
for i, c in enumerate(self.classes_):
prob = self.clfs_[i].predict_proba(X, **kwargs)[:, 1]
probs[:, i] = prob
idx_max = np.argmax(probs, 1)
return self.classes_[idx_max]
您的代码预测部分有误。使用命令 Y_pred = prob_table.argmax(axis=1)
,您将获得概率最大的列的索引。但是您想拥有概率最大的 class 而不是列索引:
Y_pred = my_classes[prob_table.argmax(axis=1)]
one-vs-rest 的基础是预测“one”的概率 class(忽略“rest”的概率 class),然后用最高的概率。 pandas
可以通过取 .idxmax
来做到这一点,returns 是概率最高的列名。
这应该有效:
import pandas
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_wine
from sklearn.model_selection import train_test_split
from sklearn.multiclass import OneVsRestClassifier
# Read/load dataset
dataset = load_wine()
X = dataset["data"]
y = dataset["target"]
classes = {
key: value
for key, value in zip(range(len(dataset["target_names"])), dataset["target_names"])
}
# Create a train/test split (training set is 80% of the data, make sure the different classes are balanced across train and test)
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.8, random_state=43, shuffle=True, stratify=y
)
# Create a set of models
estimators = {}
for class_number, class_name in classes.items():
# Create a model
estimator = SVC(
gamma="auto", C=1000, kernel="rbf", class_weight="balanced", probability=True
)
# Fit the model, make sure y is 1 if the class is the target for this estimator, otherwise (rest) 0
estimator = estimator.fit(
X_train, [1 if element == class_number else 0 for element in y_train]
)
# Store the trained model
estimators[class_number] = estimator
# Make predictions
prediction_probabilities = {}
for class_number, estimator in estimators.items():
# Every estimator predicts the probability for their target class
prediction_probabilities[class_number] = estimator.predict_proba(X_test)[:, 1]
# Combine the probabilities into a dataframe
prediction_probabilities_df = pandas.DataFrame(prediction_probabilities)
# The prediction for each row is the column with the highest probability
y_pred = prediction_probabilities_df.idxmax(axis=1)
# Calculate the test accuracy
accuracy = accuracy_score(y_test, y_pred) * 100
print(f"Test accuracy (custom OneVsRest): {accuracy}")
# Create the model
clf = OneVsRestClassifier(
SVC(gamma="auto", C=1000, kernel="rbf", class_weight="balanced")
)
clf.fit(X_train, y_train)
# Make predictions
y_pred = clf.predict(X_test)
# Calculate the test accuracy
accuracy = accuracy_score(y_test, y_pred) * 100
print(f"Test accuracy (Scikit-Learn OneVsRest): {accuracy}")
输出:
Test accuracy (custom OneVsRest): 47.22222222222222
Test accuracy (Scikit-Learn OneVsRest): 41.66666666666667