dataset=create_dataset(opt)# create a dataset given opt.dataset_mode and other options
dataset_size=len(dataset)# get the number of images in the dataset.
model=create_model(opt)# create a model given opt.model and other options
print('The number of training images = %d'%dataset_size)
visualizer=Visualizer(opt)# create a visualizer that display/save images and plots
opt.visualizer=visualizer
total_iters=0# the total number of training iterations
optimize_time=0.1
times=[]
forepochinrange(opt.epoch_count,opt.n_epochs+opt.n_epochs_decay+1):# outer loop for different epochs; we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>
epoch_start_time=time.time()# timer for entire epoch
iter_data_time=time.time()# timer for data loading per iteration
epoch_iter=0# the number of training iterations in current epoch, reset to 0 every epoch
visualizer.reset()# reset the visualizer: make sure it saves the results to HTML at least once every epoch
dataset.set_epoch(epoch)
fori,datainenumerate(dataset):# inner loop within one epoch
iter_start_time=time.time()# timer for computation per iteration
iftotal_iters%opt.print_freq==0:
t_data=iter_start_time-iter_data_time
batch_size=data["A0"].size(0)
total_iters+=batch_size
epoch_iter+=batch_size
iflen(opt.gpu_ids)>0:
torch.cuda.synchronize()
optimize_start_time=time.time()
ifepoch==opt.epoch_countandi==0:
# model.data_dependent_initialize(data)
model.setup(opt)# regular setup: load and print networks; create schedulers
model.parallelize()
model.set_input(data)# unpack data from dataset and apply preprocessing