Google Colab 在尝试重新连接到笔记本时冻结了我的浏览器和电脑
Google Colab freezes my browser and pc when trying to reconnect to a notebook
我正在 google colab 中训练机器学习模型,更具体地说,我正在使用 PyTorch-lightning 训练 GAN。问题发生在我由于不活动而与当前运行时断开连接时。当我尝试重新连接我的浏览器时(在 firefox 和 chrome 上试过)首先变得迟钝然后冻结,我的电脑开始滞后,以至于我无法关闭我的浏览器并且它不会消失。我被迫按下 PC 的电源按钮以重新启动 PC。
我不知道为什么会这样。
我尝试了各种批量大小(也是大小 1),但它仍然会发生。也不可能是我的数据集太大了(因为我在一个有 10 个图像的数据集上尝试过它来测试目的)。
我希望有人能帮助我。
这是我的代码(要使用代码,您需要 comet.nl 并输入 comet.ml api 键):
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
from pytorch_lightning import loggers
import numpy as np
from numpy.random import choice
from PIL import Image
import os
from pathlib import Path
import shutil
from collections import OrderedDict
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# randomly flip some labels
def noisy_labels(y, p_flip=0.05): # # flip labels with 5% probability
# determine the number of labels to flip
n_select = int(p_flip * y.shape[0])
# choose labels to flip
flip_ix = choice([i for i in range(y.shape[0])], size=n_select)
# invert the labels in place
y[flip_ix] = 1 - y[flip_ix]
return y
class AddGaussianNoise(object):
def __init__(self, mean=0.0, std=0.1):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
def get_valid_labels(img):
return (0.8 - 1.1) * torch.rand(img.shape[0], 1, 1, 1) + 1.1 # soft labels
def get_unvalid_labels(img):
return noisy_labels((0.0 - 0.3) * torch.rand(img.shape[0], 1, 1, 1) + 0.3) # soft labels
class Generator(nn.Module):
def __init__(self, ngf, nc, latent_dim):
super(Generator, self).__init__()
self.ngf = ngf
self.latent_dim = latent_dim
self.nc = nc
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(latent_dim, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, ndf, nc):
super(Discriminator, self).__init__()
self.nc = nc
self.ndf = ndf
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
class DCGAN(pl.LightningModule):
def __init__(self, hparams, logger, checkpoint_folder, experiment_name):
super().__init__()
self.hparams = hparams
self.logger = logger # only compatible with comet_logger at the moment
self.checkpoint_folder = checkpoint_folder
self.experiment_name = experiment_name
# networks
self.generator = Generator(ngf=hparams.ngf, nc=hparams.nc, latent_dim=hparams.latent_dim)
self.discriminator = Discriminator(ndf=hparams.ndf, nc=hparams.nc)
self.generator.apply(weights_init)
self.discriminator.apply(weights_init)
# cache for generated images
self.generated_imgs = None
self.last_imgs = None
# For experience replay
self.exp_replay_dis = torch.tensor([])
# creating checkpoint folder
dirpath = Path(self.checkpoint_folder)
if not dirpath.exists():
os.makedirs(dirpath, 0o755)
def forward(self, z):
return self.generator(z)
def adversarial_loss(self, y_hat, y):
return F.binary_cross_entropy(y_hat, y)
def training_step(self, batch, batch_nb, optimizer_idx):
# For adding Instance noise for more visit: https://www.inference.vc/instance-noise-a-trick-for-stabilising-gan-training/
std_gaussian = max(0, self.hparams.level_of_noise - ((self.hparams.level_of_noise * 1.5) * (self.current_epoch / self.hparams.epochs)))
AddGaussianNoiseInst = AddGaussianNoise(std=std_gaussian) # the noise decays over time
imgs, _ = batch
imgs = AddGaussianNoiseInst(imgs) # Adding instance noise to real images
self.last_imgs = imgs
# train generator
if optimizer_idx == 0:
# sample noise
z = torch.randn(imgs.shape[0], self.hparams.latent_dim, 1, 1)
# generate images
self.generated_imgs = self(z)
self.generated_imgs = AddGaussianNoiseInst(self.generated_imgs) # Adding instance noise to fake images
# Experience replay
# for discriminator
perm = torch.randperm(self.generated_imgs.size(0)) # Shuffeling
r_idx = perm[:max(1, self.hparams.experience_save_per_batch)] # Getting the index
self.exp_replay_dis = torch.cat((self.exp_replay_dis, self.generated_imgs[r_idx]), 0).detach() # Add our new example to the replay buffer
# ground truth result (ie: all fake)
g_loss = self.adversarial_loss(self.discriminator(self.generated_imgs), get_valid_labels(self.generated_imgs)) # adversarial loss is binary cross-entropy
tqdm_dict = {'g_loss': g_loss}
log = {'g_loss': g_loss, "std_gaussian": std_gaussian}
output = OrderedDict({
'loss': g_loss,
'progress_bar': tqdm_dict,
'log': log
})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
real_loss = self.adversarial_loss(self.discriminator(imgs), get_valid_labels(imgs))
# Experience replay
if self.exp_replay_dis.size(0) >= self.hparams.experience_batch_size:
fake_loss = self.adversarial_loss(self.discriminator(self.exp_replay_dis.detach()), get_unvalid_labels(self.exp_replay_dis)) # train on already seen images
self.exp_replay_dis = torch.tensor([]) # Reset experience replay
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {'d_loss': d_loss}
log = {'d_loss': d_loss, "d_exp_loss": fake_loss, "std_gaussian": std_gaussian}
output = OrderedDict({
'loss': d_loss,
'progress_bar': tqdm_dict,
'log': log
})
return output
else:
fake_loss = self.adversarial_loss(self.discriminator(self.generated_imgs.detach()), get_unvalid_labels(self.generated_imgs)) # how well can it label as fake?
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {'d_loss': d_loss}
log = {'d_loss': d_loss, "std_gaussian": std_gaussian}
output = OrderedDict({
'loss': d_loss,
'progress_bar': tqdm_dict,
'log': log
})
return output
def configure_optimizers(self):
lr = self.hparams.lr
b1 = self.hparams.b1
b2 = self.hparams.b2
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
return [opt_g, opt_d], []
def train_dataloader(self):
transform = transforms.Compose([transforms.Resize((self.hparams.image_size, self.hparams.image_size)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
dataset = MNIST(os.getcwd(), train=True, download=True, transform=transform)
return DataLoader(dataset, batch_size=self.hparams.batch_size)
# transform = transforms.Compose([transforms.Resize((self.hparams.image_size, self.hparams.image_size)),
# transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5])
# ])
# train_dataset = torchvision.datasets.ImageFolder(
# root="./drive/My Drive/datasets/ghibli_dataset_small_overfit/",
# transform=transform
# )
# return DataLoader(train_dataset, num_workers=self.hparams.num_workers, shuffle=True, batch_size=self.hparams.batch_size)
def on_epoch_end(self):
z = torch.randn(4, self.hparams.latent_dim, 1, 1)
# match gpu device (or keep as cpu)
if self.on_gpu:
z = z.cuda(self.last_imgs.device.index)
# log sampled images
sample_imgs = self.generator(z)
sample_imgs = sample_imgs.view(-1, self.hparams.nc, self.hparams.image_size, self.hparams.image_size)
grid = torchvision.utils.make_grid(sample_imgs, nrow=2)
self.logger.experiment.log_image(grid.permute(1, 2, 0), f'generated_images_epoch{self.current_epoch}', step=self.current_epoch)
# save model
if self.current_epoch % self.hparams.save_model_every_epoch == 0:
trainer.save_checkpoint(self.checkpoint_folder + "/" + self.experiment_name + "_epoch_" + str(self.current_epoch) + ".ckpt")
comet_logger.experiment.log_asset_folder(self.checkpoint_folder, step=self.current_epoch)
# Deleting the folder where we saved the model so that we dont upload a thing twice
dirpath = Path(self.checkpoint_folder)
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
# creating checkpoint folder
access_rights = 0o755
os.makedirs(dirpath, access_rights)
from argparse import Namespace
args = {
'batch_size': 48,
'lr': 0.0002,
'b1': 0.5,
'b2': 0.999,
'latent_dim': 128, # tested value which worked(in V4_1): 100
'nc': 1,
'ndf': 32,
'ngf': 32,
'epochs': 10,
'save_model_every_epoch': 5,
'image_size': 64,
'num_workers': 2,
'level_of_noise': 0.15,
'experience_save_per_batch': 1, # this value should be very low; tested value which works: 1
'experience_batch_size': 50 # this value shouldnt be too high; tested value which works: 50
}
hparams = Namespace(**args)
# Parameters
experiment_name = "DCGAN_V4_2_MNIST"
dataset_name = "MNIST"
checkpoint_folder = "DCGAN/"
tags = ["DCGAN", "MNIST", "OVERFIT", "64x64"]
dirpath = Path(checkpoint_folder)
# init logger
comet_logger = loggers.CometLogger(
api_key="",
rest_api_key="",
project_name="gan",
experiment_name=experiment_name,
#experiment_key="f23d00c0fe3448ee884bfbe3fc3923fd" # used for resuming trained id can be found in comet.ml
)
#defining net
net = DCGAN(hparams, comet_logger, checkpoint_folder, experiment_name)
#logging
comet_logger.experiment.set_model_graph(str(net))
comet_logger.experiment.add_tags(tags=tags)
comet_logger.experiment.log_dataset_info(dataset_name)
trainer = pl.Trainer(#resume_from_checkpoint="GHIBLI_DCGAN_OVERFIT_64px_epoch_6000.ckpt",
logger=comet_logger,
max_epochs=args["epochs"]
)
trainer.fit(net)
comet_logger.experiment.end()
我通过导入修复了它:
from IPython.display import clear_output
我正在 google colab 中训练机器学习模型,更具体地说,我正在使用 PyTorch-lightning 训练 GAN。问题发生在我由于不活动而与当前运行时断开连接时。当我尝试重新连接我的浏览器时(在 firefox 和 chrome 上试过)首先变得迟钝然后冻结,我的电脑开始滞后,以至于我无法关闭我的浏览器并且它不会消失。我被迫按下 PC 的电源按钮以重新启动 PC。 我不知道为什么会这样。 我尝试了各种批量大小(也是大小 1),但它仍然会发生。也不可能是我的数据集太大了(因为我在一个有 10 个图像的数据集上尝试过它来测试目的)。 我希望有人能帮助我。
这是我的代码(要使用代码,您需要 comet.nl 并输入 comet.ml api 键):
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
from pytorch_lightning import loggers
import numpy as np
from numpy.random import choice
from PIL import Image
import os
from pathlib import Path
import shutil
from collections import OrderedDict
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find('BatchNorm') != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# randomly flip some labels
def noisy_labels(y, p_flip=0.05): # # flip labels with 5% probability
# determine the number of labels to flip
n_select = int(p_flip * y.shape[0])
# choose labels to flip
flip_ix = choice([i for i in range(y.shape[0])], size=n_select)
# invert the labels in place
y[flip_ix] = 1 - y[flip_ix]
return y
class AddGaussianNoise(object):
def __init__(self, mean=0.0, std=0.1):
self.std = std
self.mean = mean
def __call__(self, tensor):
return tensor + torch.randn(tensor.size()) * self.std + self.mean
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)
def get_valid_labels(img):
return (0.8 - 1.1) * torch.rand(img.shape[0], 1, 1, 1) + 1.1 # soft labels
def get_unvalid_labels(img):
return noisy_labels((0.0 - 0.3) * torch.rand(img.shape[0], 1, 1, 1) + 0.3) # soft labels
class Generator(nn.Module):
def __init__(self, ngf, nc, latent_dim):
super(Generator, self).__init__()
self.ngf = ngf
self.latent_dim = latent_dim
self.nc = nc
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(latent_dim, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d( ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self, ndf, nc):
super(Discriminator, self).__init__()
self.nc = nc
self.ndf = ndf
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
class DCGAN(pl.LightningModule):
def __init__(self, hparams, logger, checkpoint_folder, experiment_name):
super().__init__()
self.hparams = hparams
self.logger = logger # only compatible with comet_logger at the moment
self.checkpoint_folder = checkpoint_folder
self.experiment_name = experiment_name
# networks
self.generator = Generator(ngf=hparams.ngf, nc=hparams.nc, latent_dim=hparams.latent_dim)
self.discriminator = Discriminator(ndf=hparams.ndf, nc=hparams.nc)
self.generator.apply(weights_init)
self.discriminator.apply(weights_init)
# cache for generated images
self.generated_imgs = None
self.last_imgs = None
# For experience replay
self.exp_replay_dis = torch.tensor([])
# creating checkpoint folder
dirpath = Path(self.checkpoint_folder)
if not dirpath.exists():
os.makedirs(dirpath, 0o755)
def forward(self, z):
return self.generator(z)
def adversarial_loss(self, y_hat, y):
return F.binary_cross_entropy(y_hat, y)
def training_step(self, batch, batch_nb, optimizer_idx):
# For adding Instance noise for more visit: https://www.inference.vc/instance-noise-a-trick-for-stabilising-gan-training/
std_gaussian = max(0, self.hparams.level_of_noise - ((self.hparams.level_of_noise * 1.5) * (self.current_epoch / self.hparams.epochs)))
AddGaussianNoiseInst = AddGaussianNoise(std=std_gaussian) # the noise decays over time
imgs, _ = batch
imgs = AddGaussianNoiseInst(imgs) # Adding instance noise to real images
self.last_imgs = imgs
# train generator
if optimizer_idx == 0:
# sample noise
z = torch.randn(imgs.shape[0], self.hparams.latent_dim, 1, 1)
# generate images
self.generated_imgs = self(z)
self.generated_imgs = AddGaussianNoiseInst(self.generated_imgs) # Adding instance noise to fake images
# Experience replay
# for discriminator
perm = torch.randperm(self.generated_imgs.size(0)) # Shuffeling
r_idx = perm[:max(1, self.hparams.experience_save_per_batch)] # Getting the index
self.exp_replay_dis = torch.cat((self.exp_replay_dis, self.generated_imgs[r_idx]), 0).detach() # Add our new example to the replay buffer
# ground truth result (ie: all fake)
g_loss = self.adversarial_loss(self.discriminator(self.generated_imgs), get_valid_labels(self.generated_imgs)) # adversarial loss is binary cross-entropy
tqdm_dict = {'g_loss': g_loss}
log = {'g_loss': g_loss, "std_gaussian": std_gaussian}
output = OrderedDict({
'loss': g_loss,
'progress_bar': tqdm_dict,
'log': log
})
return output
# train discriminator
if optimizer_idx == 1:
# Measure discriminator's ability to classify real from generated samples
# how well can it label as real?
real_loss = self.adversarial_loss(self.discriminator(imgs), get_valid_labels(imgs))
# Experience replay
if self.exp_replay_dis.size(0) >= self.hparams.experience_batch_size:
fake_loss = self.adversarial_loss(self.discriminator(self.exp_replay_dis.detach()), get_unvalid_labels(self.exp_replay_dis)) # train on already seen images
self.exp_replay_dis = torch.tensor([]) # Reset experience replay
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {'d_loss': d_loss}
log = {'d_loss': d_loss, "d_exp_loss": fake_loss, "std_gaussian": std_gaussian}
output = OrderedDict({
'loss': d_loss,
'progress_bar': tqdm_dict,
'log': log
})
return output
else:
fake_loss = self.adversarial_loss(self.discriminator(self.generated_imgs.detach()), get_unvalid_labels(self.generated_imgs)) # how well can it label as fake?
# discriminator loss is the average of these
d_loss = (real_loss + fake_loss) / 2
tqdm_dict = {'d_loss': d_loss}
log = {'d_loss': d_loss, "std_gaussian": std_gaussian}
output = OrderedDict({
'loss': d_loss,
'progress_bar': tqdm_dict,
'log': log
})
return output
def configure_optimizers(self):
lr = self.hparams.lr
b1 = self.hparams.b1
b2 = self.hparams.b2
opt_g = torch.optim.Adam(self.generator.parameters(), lr=lr, betas=(b1, b2))
opt_d = torch.optim.Adam(self.discriminator.parameters(), lr=lr, betas=(b1, b2))
return [opt_g, opt_d], []
def train_dataloader(self):
transform = transforms.Compose([transforms.Resize((self.hparams.image_size, self.hparams.image_size)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5])])
dataset = MNIST(os.getcwd(), train=True, download=True, transform=transform)
return DataLoader(dataset, batch_size=self.hparams.batch_size)
# transform = transforms.Compose([transforms.Resize((self.hparams.image_size, self.hparams.image_size)),
# transforms.ToTensor(),
# transforms.Normalize([0.5], [0.5])
# ])
# train_dataset = torchvision.datasets.ImageFolder(
# root="./drive/My Drive/datasets/ghibli_dataset_small_overfit/",
# transform=transform
# )
# return DataLoader(train_dataset, num_workers=self.hparams.num_workers, shuffle=True, batch_size=self.hparams.batch_size)
def on_epoch_end(self):
z = torch.randn(4, self.hparams.latent_dim, 1, 1)
# match gpu device (or keep as cpu)
if self.on_gpu:
z = z.cuda(self.last_imgs.device.index)
# log sampled images
sample_imgs = self.generator(z)
sample_imgs = sample_imgs.view(-1, self.hparams.nc, self.hparams.image_size, self.hparams.image_size)
grid = torchvision.utils.make_grid(sample_imgs, nrow=2)
self.logger.experiment.log_image(grid.permute(1, 2, 0), f'generated_images_epoch{self.current_epoch}', step=self.current_epoch)
# save model
if self.current_epoch % self.hparams.save_model_every_epoch == 0:
trainer.save_checkpoint(self.checkpoint_folder + "/" + self.experiment_name + "_epoch_" + str(self.current_epoch) + ".ckpt")
comet_logger.experiment.log_asset_folder(self.checkpoint_folder, step=self.current_epoch)
# Deleting the folder where we saved the model so that we dont upload a thing twice
dirpath = Path(self.checkpoint_folder)
if dirpath.exists() and dirpath.is_dir():
shutil.rmtree(dirpath)
# creating checkpoint folder
access_rights = 0o755
os.makedirs(dirpath, access_rights)
from argparse import Namespace
args = {
'batch_size': 48,
'lr': 0.0002,
'b1': 0.5,
'b2': 0.999,
'latent_dim': 128, # tested value which worked(in V4_1): 100
'nc': 1,
'ndf': 32,
'ngf': 32,
'epochs': 10,
'save_model_every_epoch': 5,
'image_size': 64,
'num_workers': 2,
'level_of_noise': 0.15,
'experience_save_per_batch': 1, # this value should be very low; tested value which works: 1
'experience_batch_size': 50 # this value shouldnt be too high; tested value which works: 50
}
hparams = Namespace(**args)
# Parameters
experiment_name = "DCGAN_V4_2_MNIST"
dataset_name = "MNIST"
checkpoint_folder = "DCGAN/"
tags = ["DCGAN", "MNIST", "OVERFIT", "64x64"]
dirpath = Path(checkpoint_folder)
# init logger
comet_logger = loggers.CometLogger(
api_key="",
rest_api_key="",
project_name="gan",
experiment_name=experiment_name,
#experiment_key="f23d00c0fe3448ee884bfbe3fc3923fd" # used for resuming trained id can be found in comet.ml
)
#defining net
net = DCGAN(hparams, comet_logger, checkpoint_folder, experiment_name)
#logging
comet_logger.experiment.set_model_graph(str(net))
comet_logger.experiment.add_tags(tags=tags)
comet_logger.experiment.log_dataset_info(dataset_name)
trainer = pl.Trainer(#resume_from_checkpoint="GHIBLI_DCGAN_OVERFIT_64px_epoch_6000.ckpt",
logger=comet_logger,
max_epochs=args["epochs"]
)
trainer.fit(net)
comet_logger.experiment.end()
我通过导入修复了它:
from IPython.display import clear_output