错误框中的混淆矩阵值
confusion matrix values in the wrong box
我编写了一个混淆矩阵代码,以便根据在线文档比较两个数字列表,当我认为我得到了好的结果时,我注意到这些值的位置很奇怪。首先,这是我使用的代码:
## Classification report and confusion matrix
import numpy as np
def evaluate_pred(y_true, y_pred):
y_test = np.array(y_true)
y_predict = np.array(y_pred)
target_names = ['Empty', 'Human', 'Dog', 'Dog&Human']
labels_names = [0,1,2,3]
print(classification_report(y_test, y_predict,labels=labels_names, target_names=target_names))
cm = confusion_matrix(y_test, y_predict,labels=labels_names, normalize='pred')
cm2 = confusion_matrix(y_test, y_predict,labels=labels_names)
disp = ConfusionMatrixDisplay(confusion_matrix=cm,display_labels=target_names)
disp = disp.plot(cmap=plt.cm.Blues,values_format='g')
disp2 = ConfusionMatrixDisplay(confusion_matrix=cm2,display_labels=target_names)
disp2 = disp2.plot(cmap=plt.cm.Blues,values_format='g')
plt.show()
给它两个列表(标签和预测)后,我得到以下结果(下面是归一化矩阵),但如您所见,每个 class 的行应该加起来总数,而是它的列。我尝试了不同的方法,但仍然无法修复。我缺少一些东西,但我无法弄清楚。非常感谢您的帮助。
这有帮助吗?
完整示例:
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = np.repeat(np.arange(0,10),15)
class_names = np.array(['1', '2', '3', '4', '5','6','7','8','9','10'])
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.xlim(-0.5, len(np.unique(y))-0.5)
plt.ylim(len(np.unique(y))-0.5, -0.5)
return ax
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
我只需要使用 normalize='true'
而不是 normalize='pred'
来解决问题。似乎将值设置为 pred
会考虑每列的总和,然后据此计算百分比。
我编写了一个混淆矩阵代码,以便根据在线文档比较两个数字列表,当我认为我得到了好的结果时,我注意到这些值的位置很奇怪。首先,这是我使用的代码:
## Classification report and confusion matrix
import numpy as np
def evaluate_pred(y_true, y_pred):
y_test = np.array(y_true)
y_predict = np.array(y_pred)
target_names = ['Empty', 'Human', 'Dog', 'Dog&Human']
labels_names = [0,1,2,3]
print(classification_report(y_test, y_predict,labels=labels_names, target_names=target_names))
cm = confusion_matrix(y_test, y_predict,labels=labels_names, normalize='pred')
cm2 = confusion_matrix(y_test, y_predict,labels=labels_names)
disp = ConfusionMatrixDisplay(confusion_matrix=cm,display_labels=target_names)
disp = disp.plot(cmap=plt.cm.Blues,values_format='g')
disp2 = ConfusionMatrixDisplay(confusion_matrix=cm2,display_labels=target_names)
disp2 = disp2.plot(cmap=plt.cm.Blues,values_format='g')
plt.show()
给它两个列表(标签和预测)后,我得到以下结果(下面是归一化矩阵),但如您所见,每个 class 的行应该加起来总数,而是它的列。我尝试了不同的方法,但仍然无法修复。我缺少一些东西,但我无法弄清楚。非常感谢您的帮助。
这有帮助吗?
完整示例:
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.utils.multiclass import unique_labels
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = np.repeat(np.arange(0,10),15)
class_names = np.array(['1', '2', '3', '4', '5','6','7','8','9','10'])
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes, yticklabels=classes,
title=title,
ylabel='True label',
xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right",
rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt),
ha="center", va="center",
color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
plt.xlim(-0.5, len(np.unique(y))-0.5)
plt.ylim(len(np.unique(y))-0.5, -0.5)
return ax
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=class_names,
title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plot_confusion_matrix(y_test, y_pred, classes=class_names, normalize=True,
title='Normalized confusion matrix')
plt.show()
我只需要使用 normalize='true'
而不是 normalize='pred'
来解决问题。似乎将值设置为 pred
会考虑每列的总和,然后据此计算百分比。