使用自定义数据集微调后如何检查 confusion_matrix?
How can I check a confusion_matrix after fine-tuning with custom datasets?
此问题与 Data Science Stack Exchange 上的 How can I check a confusion_matrix after fine-tuning with custom datasets? 相同。
背景
我想检查一个 confusion_matrix,包括精度、召回率和 f1 分数,如下所示,在使用自定义数据集进行微调后。
微调过程和任务是Sequence Classification with IMDb Reviews on the Fine-tuning with custom datasets tutorial on Hugging face。
用Trainer微调完成后,在这种情况下如何查看confusion_matrix?
confusion_matrix 的图像,包括精度、召回率和 f1 分数 original site:仅作为示例输出图像
predictions = np.argmax(trainer.test(test_x), axis=1)
# Confusion matrix and classification report.
print(classification_report(test_y, predictions))
precision recall f1-score support
0 0.75 0.79 0.77 1000
1 0.81 0.87 0.84 1000
2 0.63 0.61 0.62 1000
3 0.55 0.47 0.50 1000
4 0.66 0.66 0.66 1000
5 0.62 0.64 0.63 1000
6 0.74 0.83 0.78 1000
7 0.80 0.74 0.77 1000
8 0.85 0.81 0.83 1000
9 0.79 0.80 0.80 1000
avg / total 0.72 0.72 0.72 10000
代码
from transformers import DistilBertForSequenceClassification, Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir='./results', # output directory
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
logging_steps=10,
)
model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
trainer = Trainer(
model=model, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=val_dataset # evaluation dataset
)
trainer.train()
到目前为止我做了什么
准备Sequence Classification with IMDb Reviews的数据集,正在使用Trainer进行微调
from pathlib import Path
def read_imdb_split(split_dir):
split_dir = Path(split_dir)
texts = []
labels = []
for label_dir in ["pos", "neg"]:
for text_file in (split_dir/label_dir).iterdir():
texts.append(text_file.read_text())
labels.append(0 if label_dir is "neg" else 1)
return texts, labels
train_texts, train_labels = read_imdb_split('aclImdb/train')
test_texts, test_labels = read_imdb_split('aclImdb/test')
from sklearn.model_selection import train_test_split
train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.2)
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
test_encodings = tokenizer(test_texts, truncation=True, padding=True)
import torch
class IMDbDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
train_dataset = IMDbDataset(train_encodings, train_labels)
val_dataset = IMDbDataset(val_encodings, val_labels)
test_dataset = IMDbDataset(test_encodings, test_labels)
在这种情况下您可以做的是迭代验证集(或测试集)并手动创建 y_true
和 y_pred
.[=19 的列表=]
import torch
import torch.nn.functional as F
from sklearn import metrics
y_preds = []
y_trues = []
for index,val_text in enumerate(val_texts):
tokenized_val_text = tokenizer([val_text],
truncation=True,
padding=True,
return_tensor='pt')
logits = model(tokenized_val_text)
prediction = F.softmax(logits, dim=1)
y_pred = torch.argmax(prediction).numpy()
y_true = val_labels[index]
y_preds.append(y_pred)
y_trues.append(y_true)
最后,
confusion_matrix = metrics.confusion_matrix(y_trues, y_preds, labels=["neg", "pos"]))
print(confusion_matrix)
观察:
- 模型的输出是
logits
,而不是归一化的概率。
- 因此,我们在第一维上应用
softmax
以转换为实际概率(例如 0.2% class 0
、0.8% class 1
)。
- 我们应用
.argmax()
操作来获取class的索引。
此问题与 Data Science Stack Exchange 上的 How can I check a confusion_matrix after fine-tuning with custom datasets? 相同。
背景
我想检查一个 confusion_matrix,包括精度、召回率和 f1 分数,如下所示,在使用自定义数据集进行微调后。
微调过程和任务是Sequence Classification with IMDb Reviews on the Fine-tuning with custom datasets tutorial on Hugging face。
用Trainer微调完成后,在这种情况下如何查看confusion_matrix?
confusion_matrix 的图像,包括精度、召回率和 f1 分数 original site:仅作为示例输出图像
predictions = np.argmax(trainer.test(test_x), axis=1)
# Confusion matrix and classification report.
print(classification_report(test_y, predictions))
precision recall f1-score support
0 0.75 0.79 0.77 1000
1 0.81 0.87 0.84 1000
2 0.63 0.61 0.62 1000
3 0.55 0.47 0.50 1000
4 0.66 0.66 0.66 1000
5 0.62 0.64 0.63 1000
6 0.74 0.83 0.78 1000
7 0.80 0.74 0.77 1000
8 0.85 0.81 0.83 1000
9 0.79 0.80 0.80 1000
avg / total 0.72 0.72 0.72 10000
代码
from transformers import DistilBertForSequenceClassification, Trainer, TrainingArguments
training_args = TrainingArguments(
output_dir='./results', # output directory
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir='./logs', # directory for storing logs
logging_steps=10,
)
model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
trainer = Trainer(
model=model, # the instantiated Transformers model to be trained
args=training_args, # training arguments, defined above
train_dataset=train_dataset, # training dataset
eval_dataset=val_dataset # evaluation dataset
)
trainer.train()
到目前为止我做了什么
准备Sequence Classification with IMDb Reviews的数据集,正在使用Trainer进行微调
from pathlib import Path
def read_imdb_split(split_dir):
split_dir = Path(split_dir)
texts = []
labels = []
for label_dir in ["pos", "neg"]:
for text_file in (split_dir/label_dir).iterdir():
texts.append(text_file.read_text())
labels.append(0 if label_dir is "neg" else 1)
return texts, labels
train_texts, train_labels = read_imdb_split('aclImdb/train')
test_texts, test_labels = read_imdb_split('aclImdb/test')
from sklearn.model_selection import train_test_split
train_texts, val_texts, train_labels, val_labels = train_test_split(train_texts, train_labels, test_size=.2)
from transformers import DistilBertTokenizerFast
tokenizer = DistilBertTokenizerFast.from_pretrained('distilbert-base-uncased')
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
test_encodings = tokenizer(test_texts, truncation=True, padding=True)
import torch
class IMDbDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item['labels'] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
train_dataset = IMDbDataset(train_encodings, train_labels)
val_dataset = IMDbDataset(val_encodings, val_labels)
test_dataset = IMDbDataset(test_encodings, test_labels)
在这种情况下您可以做的是迭代验证集(或测试集)并手动创建 y_true
和 y_pred
.[=19 的列表=]
import torch
import torch.nn.functional as F
from sklearn import metrics
y_preds = []
y_trues = []
for index,val_text in enumerate(val_texts):
tokenized_val_text = tokenizer([val_text],
truncation=True,
padding=True,
return_tensor='pt')
logits = model(tokenized_val_text)
prediction = F.softmax(logits, dim=1)
y_pred = torch.argmax(prediction).numpy()
y_true = val_labels[index]
y_preds.append(y_pred)
y_trues.append(y_true)
最后,
confusion_matrix = metrics.confusion_matrix(y_trues, y_preds, labels=["neg", "pos"]))
print(confusion_matrix)
观察:
- 模型的输出是
logits
,而不是归一化的概率。 - 因此,我们在第一维上应用
softmax
以转换为实际概率(例如0.2% class 0
、0.8% class 1
)。 - 我们应用
.argmax()
操作来获取class的索引。