Pytorch - 跳过计算每个时期的预训练模型的特征
Pytorch - skip calculating features of pretrained models for every epoch
我习惯使用 tenserflow - keras 但现在我被迫开始使用 Pytorch 以解决灵活性问题。但是,我似乎没有找到只专注于训练模型分类层的 pytorch 代码。这不是普遍做法吗?现在我必须等待计算每个时期相同数据的特征提取。有没有办法避免这种情况?
# in tensorflow - keras :
from tensorflow.keras.applications import vgg16, MobileNetV2, mobilenet_v2
# Load a pre-trained
pretrained_nn = MobileNetV2(weights='imagenet', include_top=False, input_shape=(Image_size, Image_size, 3))
# Extract features of the training data only once
X = mobilenet_v2.preprocess_input(X)
features_x = pretrained_nn.predict(X)
# Save features for later use
joblib.dump(features_x, "features_x.dat")
# Create a model and add layers
model = Sequential()
model.add(Flatten(input_shape=features_x.shape[1:]))
model.add(Dense(100, activation='relu', use_bias=True))
model.add(Dense(Y.shape[1], activation='softmax', use_bias=False))
# Compile & train only the fully connected model
model.compile( loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(learning_rate=0.001))
history = model.fit( features_x, Y_train, batch_size=16, epochs=Epochs)
假设您已经具备了 features_x
中的特征,您可以像这样创建和训练模型:
# create a loader for the data
dataset = torch.utils.data.TensorDataset(features_x, Y_train)
loader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
# define the classification model
in_features = features_x.flatten(1).size(1)
model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(in_features=in_features, out_features=100, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(in_features=100, out_features=Y.shape[1], bias=False) # Softmax is handled by CrossEntropyLoss below
)
model.train()
# define the optimizer and loss function
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
loss_function = torch.nn.CrossEntropyLoss()
# training loop
for e in range(Epochs):
for batch_x, batch_y in enumerate(loader):
optimizer.zero_grad() # clear gradients from previous batch
out = model(batch_x) # forward pass
loss = loss_function(out, batch_y) # compute loss
loss.backward() # backpropagate, get gradients
optimizer.step() # update model weights
我习惯使用 tenserflow - keras 但现在我被迫开始使用 Pytorch 以解决灵活性问题。但是,我似乎没有找到只专注于训练模型分类层的 pytorch 代码。这不是普遍做法吗?现在我必须等待计算每个时期相同数据的特征提取。有没有办法避免这种情况?
# in tensorflow - keras :
from tensorflow.keras.applications import vgg16, MobileNetV2, mobilenet_v2
# Load a pre-trained
pretrained_nn = MobileNetV2(weights='imagenet', include_top=False, input_shape=(Image_size, Image_size, 3))
# Extract features of the training data only once
X = mobilenet_v2.preprocess_input(X)
features_x = pretrained_nn.predict(X)
# Save features for later use
joblib.dump(features_x, "features_x.dat")
# Create a model and add layers
model = Sequential()
model.add(Flatten(input_shape=features_x.shape[1:]))
model.add(Dense(100, activation='relu', use_bias=True))
model.add(Dense(Y.shape[1], activation='softmax', use_bias=False))
# Compile & train only the fully connected model
model.compile( loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(learning_rate=0.001))
history = model.fit( features_x, Y_train, batch_size=16, epochs=Epochs)
假设您已经具备了 features_x
中的特征,您可以像这样创建和训练模型:
# create a loader for the data
dataset = torch.utils.data.TensorDataset(features_x, Y_train)
loader = torch.utils.data.DataLoader(dataset, batch_size=16, shuffle=True)
# define the classification model
in_features = features_x.flatten(1).size(1)
model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(in_features=in_features, out_features=100, bias=True),
torch.nn.ReLU(),
torch.nn.Linear(in_features=100, out_features=Y.shape[1], bias=False) # Softmax is handled by CrossEntropyLoss below
)
model.train()
# define the optimizer and loss function
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
loss_function = torch.nn.CrossEntropyLoss()
# training loop
for e in range(Epochs):
for batch_x, batch_y in enumerate(loader):
optimizer.zero_grad() # clear gradients from previous batch
out = model(batch_x) # forward pass
loss = loss_function(out, batch_y) # compute loss
loss.backward() # backpropagate, get gradients
optimizer.step() # update model weights