Skip to content
Snippets Groups Projects
Commit 51b2b55a authored by Srijeet Roy's avatar Srijeet Roy
Browse files

merge class conditional and inpainting evaluation pipelines

parent 8d4c7bce
Branches
No related tags found
No related merge requests found
...@@ -12,8 +12,9 @@ from evaluation.helpers.kNN import * ...@@ -12,8 +12,9 @@ from evaluation.helpers.kNN import *
from evaluation.helpers.metrics import * from evaluation.helpers.metrics import *
def cdm_evaluator(experiment_path, realpath, genpath, size=128, arch='clip', mode='both', k=3, sample=10, name_appendix='', fid='no'): def cdm_evaluator_afhq_class(experiment_path, realpath, genpath, size=128, arch='clip', mode='both', k=3, sample=10, name_appendix='', fid='no'):
#device = "mps" if torch.backends.mps.is_available() else "cpu"
device = "cuda" if torch.cuda.is_available() else "cpu" device = "cuda" if torch.cuda.is_available() else "cpu"
print('device:', device) print('device:', device)
...@@ -174,3 +175,158 @@ def cdm_evaluator(experiment_path, realpath, genpath, size=128, arch='clip', mod ...@@ -174,3 +175,158 @@ def cdm_evaluator(experiment_path, realpath, genpath, size=128, arch='clip', mod
size=size, size=size,
name_appendix=name_appendix) name_appendix=name_appendix)
print('Finish!') print('Finish!')
def cdm_evaluator_lhq_paint(experiment_path, realpath, genpath, size=128, arch='clip', mode='kNN', k=3, sample=10, name_appendix='', fid='no'):
#device = "mps" if torch.backends.mps.is_available() else "cpu"
device = "cuda" if torch.cuda.is_available() else "cpu"
print('device:', device)
path_to_real_images = realpath # path to real images (assumes that there are 2 subdirectories - train and test)
path_to_generated_images = genpath # path to generated samples
size = size # image resolution
arch = arch # architecture to extract features - choose between 'cnn' and 'clip'
mode = mode # qualitative eval mode - 'kNN'
k_kNN = k # value of k if mode=='kNN'
sample = sample # for kNN, find kNNs of first 'sample' samples in the dir; for pairs, find top 'sample' closest ones
name_appendix = name_appendix # name appendix for evaluation files
fid_bool = fid # whether to compute PSNR and SSIM
print('Start')
# change working directory to output folder (experiment_path/eval_output)
output_path = Path(os.path.join(experiment_path,'eval_output'))
if not output_path.is_dir():
os.mkdir(output_path)
# output path
os.chdir(output_path)
# create output text file, store evaluation metadata
txt_filename = 'evaluation_' + '_' + arch + '_' + mode + '-' + name_appendix + '.txt'
with open(txt_filename, 'w') as f:
f.write(f'Path to real images: {path_to_real_images}\n')
f.write(f'Path to generated images: {path_to_generated_images}\n')
f.write(f'Experiment on AFHQ dataset with images of resolution {size}x{size}\n')
f.write(f'Using {arch} model to extract features\n')
f.write(f'Plot of {mode} on {sample} samples\n')
f.write(f'Quantitative metrics computed: {fid_bool}\n')
# datapaths
path_to_training_images = path_to_real_images
# compute quantitative metrics (FID, IS and variants)
if fid_bool == 'yes':
# Content-invariant metrics
train_images = image_to_tensor(path_to_training_images, device=device)
generated = image_to_tensor(path_to_generated_images, device=device)
print('Computing PSNR and SSIM scores...')
psnr_score, ssim_score = compute_ssim_psnr_scores(train_images, generated, device)
with open(txt_filename, 'a') as f:
f.write(f'PSNR score: {psnr_score}\n')
f.write(f'SSIM score: {ssim_score}\n')
print(f'Loading model {arch}...')
feature_flag = False
# Quantitative Evaluations
# load pre-trained models
pth = '/home/wn455752/repo/evaluation/features/lhq'
# load pretrained ResNet50
if arch == 'cnn':
print('Loading pretrained ResNet50...')
path_to_pretrained_weights = '/home/wn455752/repo/evaluation/pretrained/resnet50_places365_pretrained/resnet50_places365_weights.pth'
weights = torch.load(path_to_pretrained_weights)
model = resnet50().to(device)
model.load_state_dict(weights)
transform = transforms.Compose([transforms.ToTensor(), # transform PIL.Image to torch.Tensor
transforms.Lambda(lambda x: x * 255)]) # scale values to VGG input range
with torch.no_grad():
model.eval()
print('Checking for existing training dataset features...')
# check for saved dataset features
resnet_pth = Path(os.path.join(str(pth), 'resnet_features'))
if resnet_pth.is_dir():
name_pth = Path(os.path.join(str(pth), 'resnet_features/real_name_list'))
if name_pth.is_file():
with open(name_pth, 'rb') as fp:
real_names = pickle.load(fp)
feature_pth = Path(os.path.join(pth, 'resnet_features/real_image_features.pt'))
if name_pth.is_file():
print('Loading existing training dataset features...')
real_features = torch.load(feature_pth, map_location="cpu")
real_features = real_features.to(device)
feature_flag = True
else:
os.mkdir(pth)
os.mkdir(os.path.join(str(pth), 'resnet_features'))
name_pth = Path(os.path.join(str(pth), 'resnet_features/real_name_list'))
feature_pth = Path(os.path.join(str(pth), 'resnet_features/real_image_features.pt'))
# load CLIP
elif arch == 'clip':
print('Loading pretrained CLIP...')
model, transform = clip.load("ViT-B/32", device=device)
# check for saved dataset features
print('Checking for existing training dataset features...')
clip_pth = Path(os.path.join(str(pth), 'clip_features'))
if clip_pth.is_dir():
name_pth = Path(os.path.join(pth, 'clip_features/real_name_list'))
if name_pth.is_file():
with open(name_pth, 'rb') as fp:
real_names = pickle.load(fp)
feature_pth = Path(os.path.join(pth, 'clip_features/real_image_features.pt'))
if name_pth.is_file():
print('Loading existing training dataset features...')
real_features = torch.load(feature_pth, map_location="cpu")
real_features = real_features.to(device)
feature_flag = True
else:
os.mkdir(pth)
os.mkdir(os.path.join(str(pth), 'clip_features'))
name_pth = Path(os.path.join(str(pth), 'clip_features/real_name_list'))
feature_pth = Path(os.path.join(str(pth), 'clip_features/real_image_features.pt'))
# Qualitative Evaluations
knn = kNN()
# collect images from directories and store in a tensor
if not feature_flag:
print('Collecting training images...')
real_names, real_tensor = knn.get_images(path_to_training_images, transform)
with open(name_pth, 'wb') as fp:
pickle.dump(real_names, fp)
print('Collecting generated images...')
generated_names, generated_tensor = knn.get_images(path_to_generated_images, transform)
# extract features from image tensors
if not feature_flag:
print('Extracting features from training images...')
real_features = knn.feature_extractor(real_tensor, model, device)
torch.save(real_features, feature_pth)
print('Extracting features from generated images...')
generated_features = knn.feature_extractor(generated_tensor, model, device)
if sample == 'all':
sample_size = len(generated_names)
else:
sample_size = int(sample)
print('Finding kNNs...')
knn.kNN(output_path,
real_names, generated_names,
real_features, generated_features,
path_to_training_images, path_to_generated_images,
k=k_kNN,
sample=sample_size,
size=size,
name_appendix=name_appendix)
print('Finish!')
\ No newline at end of file
...@@ -14,6 +14,7 @@ These evaluations include - ...@@ -14,6 +14,7 @@ These evaluations include -
2. Inception score 2. Inception score
3. Clean FID score (with CLIP) 3. Clean FID score (with CLIP)
4. FID infinity and IS infinity scores 4. FID infinity and IS infinity scores
These metrics are computed for class conditional diffusion model, as generated samples do not have any ground truth.
Content invariant metrics are useful when the model output can be compared w.r.t a ground truth. \ Content invariant metrics are useful when the model output can be compared w.r.t a ground truth. \
For example, our model can output the reconstructed version of an input training image (following the entire forward \ For example, our model can output the reconstructed version of an input training image (following the entire forward \
...@@ -21,6 +22,7 @@ and reverse trajectories). \ ...@@ -21,6 +22,7 @@ and reverse trajectories). \
These evaluation include - These evaluation include -
1. SSIM (Structural Similarity Index Metric) 1. SSIM (Structural Similarity Index Metric)
2. PSNR 2. PSNR
These metrics are computed for the inpainting model to compare the inpainted output with ground truth sample.
### Qualitative evaluations - ### Qualitative evaluations -
...@@ -35,15 +37,19 @@ extracted by using a pretrained model (ResNet50-Places365/VGGFace or CLIP). Base ...@@ -35,15 +37,19 @@ extracted by using a pretrained model (ResNet50-Places365/VGGFace or CLIP). Base
1. kNN - plot the k nearest neighbors of the generated samples 1. kNN - plot the k nearest neighbors of the generated samples
2. Closest pairs - plot the top pairs with smallest MSE value 2. Closest pairs - plot the top pairs with smallest MSE value
For inpainting model, only closest pairs are computed to find those inpainted outputs that look the most similar to the \
raw image before applying the mask.
### Argumnets - ### Argumnets -
Execution starts with evaluate_full.py file. Input arguments are - Execution starts with evaluate.py file.
For Class Conditional Diffusion Model, the function ```cdm_evaluator_afhq_class``` is called with the following arguments:
* <pre>-rp, --realpath : Path to real images (string) </pre> * <pre>-rp, --realpath : Path to real images (string) </pre>
* <pre>-gp, --genpath : Path to generated images (string) </pre> * <pre>-gp, --genpath : Path to generated images (string) </pre>
* <pre>--size : Resolution of images the model was trained on, default 128 (int) </pre> * <pre>--size : Resolution of images the model was trained on, default 128 (int) </pre>
* <pre>-a, --arch : Choose between 'cnn' and 'clip'. Chosen pretrained model is used to extract features from the images. * <pre>-a, --arch : Pretrained model is used to extract features from the images.
Default = 'clip' (string) Default = 'clip' (string)
**!!! Currently no CNN models are supported**</pre> **!!! Currently no CNN models are supported**</pre>
* <pre>-m, --mode : Choose between 'kNN' and 'pairs' (for closest pairs) or both, default = 'both' (string) </pre> * <pre>-m, --mode : Choose between 'kNN' and 'pairs' (for closest pairs) or both, default = 'both' (string) </pre>
...@@ -54,11 +60,28 @@ Execution starts with evaluate_full.py file. Input arguments are - ...@@ -54,11 +60,28 @@ Execution starts with evaluate_full.py file. Input arguments are -
* <pre>-n, --name : Name appendix (string) </pre> * <pre>-n, --name : Name appendix (string) </pre>
* <pre>--fid : Choose between 'yes' and 'no'. Compute FID, Inception score and their variants. Default 'no' (string) </pre> * <pre>--fid : Choose between 'yes' and 'no'. Compute FID, Inception score and their variants. Default 'no' (string) </pre>
For Inpainting Model, the function ```cdm_evaluator_lhq_paint``` is called with the following arguments:
* <pre>-rp, --realpath : Path to real images (string) </pre>
* <pre>-gp, --genpath : Path to generated images (string) </pre>
* <pre>--size : Resolution of images the model was trained on, default 128 (int) </pre>
* <pre>-a, --arch : Choose between 'clip' and 'cnn'. Pretrained model is used to extract features from the images.
Default = 'clip' (string) </pre>
* <pre>-m, --mode : Default = 'pairs' (string) </pre>
* <pre>-k, --k : k value for kNN, default = 3 (int) </pre>
* <pre>-s, --sample : Choose between an int and 'all'. If mode is 'kNN', plot kNN for this many samples (first s samples
in the directory of generated images). If mode is 'pairs', plot the top s closest pairs from entire
directory of generated images. Default 10 (int or 'all') </pre>
* <pre>-n, --name : Name appendix (string) </pre>
* <pre>--fid : Choose between 'yes' and 'no'. Compute FID, Inception score and their variants. Default 'no' (string) </pre>
Path to real images leads to a directory with two sub-directories - train and test. Path to real images leads to a directory with two sub-directories - train and test.
<pre> <pre>
data data
|_ lhq
| |_ train
| |_ test
|_ afhq |_ afhq
| |_ train | |_ train
| |_ cat | |_ cat
...@@ -70,10 +93,11 @@ data ...@@ -70,10 +93,11 @@ data
| |_ wild | |_ wild
</pre> </pre>
CLIP features of training images are saved after the first execution. This alleviates the need to recompute \ CLIP (or CNN) features of training images are saved after the first execution. This alleviates the need to recompute \
features of real images for different sets of generated samples. features of real images for different sets of generated samples.
### Links ### Links
3. Clean FID - https://github.com/GaParmar/clean-fid/tree/main 1. ResNet50 pretrained on Places365 - https://github.com/CSAILVision/places365
4. FID infinity, IS infinity - https://github.com/mchong6/FID_IS_infinity/tree/master 2. Clean FID - https://github.com/GaParmar/clean-fid/tree/main
\ No newline at end of file 3. FID infinity, IS infinity - https://github.com/mchong6/FID_IS_infinity/tree/master
\ No newline at end of file
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
from trainer.train import * from trainer.train import *
from dataloader.load import * from dataloader.load import *
from models.Framework import * from models.ConditionalDiffusionModel import *
from models.all_unets import * from models.conditional_unet import *
import torch import torch
from torch import nn from torch import nn
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Prepare experiment # Prepare experiment
1. Choose Hyperparameter Settings 1. Choose Hyperparameter Settings
2. Run notebook on local maschine to generate experiment folder with the JSON files containing the settings 2. Run notebook on local maschine to generate experiment folder with the JSON files containing the settings
3. scp experiment folder to the HPC 3. scp experiment folder to the HPC
4. Run Pipeline by adding following to batch file: 4. Run Pipeline by adding following to batch file:
- Train Model: &emsp;&emsp;&emsp;&emsp;&emsp; `python main.py train "<absolute path of experiment folder in hpc>"` - Train Model: &emsp;&emsp;&emsp;&emsp;&emsp; `python main.py train "<absolute path of experiment folder in hpc>"`
- Sample Images: &emsp;&emsp;&emsp; `python main.py sample "<absolute path of experiment folder in hpc>"` - Sample Images: &emsp;&emsp;&emsp; `python main.py sample "<absolute path of experiment folder in hpc>"`
- Evaluate Model: &emsp;&emsp;&emsp; `python main.py evaluate "<absolute path of experiment folder in hpc>"` - Evaluate Model: &emsp;&emsp;&emsp; `python main.py evaluate "<absolute path of experiment folder in hpc>"`
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import torch import torch
#### ####
# Settings # Settings
#### ####
# Dataset path # Dataset path
datapath = "/work/lect0100/lhq_256" datapath = "/home/wn455752/repo/evaluation/data/afhq"
# Experiment setup # Experiment setup
run_name = 'main_test1' # WANDB and experiment folder Name! run_name = 'afhq_eval_ep_499_no_cfg' # WANDB and experiment folder Name!
checkpoint = None #'model_epoch_8.pth' # Name of checkpoint pth file or None checkpoint = None #'model_epoch_8.pth' # Name of checkpoint pth file or None
experiment_path = "/work/lect0100/main_experiment/" + run_name +'/'
experiment_path = '/home/wn455752/repo/conditional-diffusion/experiments/' + run_name + '/'
# Path to save generated experiment folder on local machine # Path to save generated experiment folder on local machine
local_path ="experiments/" + run_name + '/settings' local_path ="experiments/" + run_name + '/settings'
# Diffusion Model Settings # Diffusion Model Settings
diffusion_steps = 1000 diffusion_steps = 1000
image_size = 128 image_size = 128
channels = 3 channels = 3
# Training # Training
batchsize = 32 batchsize = 32
epochs = 100 epochs = 100
store_iter = 10 store_iter = 10
eval_iter = 500 eval_iter = 500
learning_rate = 0.0001 learning_rate = 0.0001
optimizername = "torch.optim.AdamW" optimizername = "torch.optim.AdamW"
optimizer_params = None optimizer_params = None
verbose = False verbose = False
# checkpoint = None #(If no checkpoint training, ie. random weights) # checkpoint = None #(If no checkpoint training, ie. random weights)
# Sampling # Sampling
sample_size = 20 sample_size = 20
intermediate = False # True if you want to sample one image and all ist intermediate latents intermediate = False # True if you want to sample one image and all ist intermediate latents
sample_all=False sample_all=False
# Evaluating # Evaluating
... # class conditional
eval_realpath = '/home/wn455752/repo/evaluation/data/afhq' # path to real images (assumes the dir has two subdirs - train and test)
eval_genpath = '/home/wn455752/repo/evaluation/samples/afhq_samples/samples/epoch_499_no_cfg/epoch_500/sample_1' # path to sampled images
eval_size=image_size # resolution of training images
eval_arch='clip' # DO NOT CHANGE
eval_mode='both' # choose between 'kNN' and 'pairs' (for closest pairs) or 'both'
eval_k_kNN=3 # choose k for kNN
eval_sample=10 # in case of kNN, find kNN of first 'sample' number of generated samples
# in case of pairs, find top 'sample' number of closest pairs of
# real-generated images from the entire set of generated samples
eval_fid='yes' # whether to compute FID, IS scores (for class conditional)
eval_name_appendix='afhq_eval_ep_499_no_cfg' # name appendix
# inpainting
#eval_realpath = '/home/wn455752/repo/evaluation/samples/inpainting_samples/paint_lhq/samples/epoch_190/sample_12/raw/' # path to real images (assumes the dir has two subdirs - train and test)
#eval_genpath = '/home/wn455752/repo/evaluation/samples/inpainting_samples/paint_lhq/samples/epoch_190/sample_12/inpaint/' # path to sampled images
#eval_size=image_size # resolution of training images
#eval_arch='clip' # choose between 'clip' or 'cnn'
#eval_mode='pairs' # DO NOT CHANGE
#eval_k_kNN=3 # choose k for kNN
#eval_sample=20 # in case of kNN, find kNN of first 'sample' number of generated samples
# in case of pairs, find top 'sample' number of closest pairs of
# real-generated images from the entire set of generated samples
#eval_fid='yes' # whether to compute PSNR & SSIM
#eval_name_appendix='inpaint_eval_ep_190' # name appendix
### ###
# Advanced Settings Dictionaries # Advanced Settings Dictionaries
### ###
meta_setting = dict(modelname = "UNet_Res", meta_setting = dict(modelname = "Conditional_UNet_Res",
dataset = "UnconditionalDataset", dataset = "ConditionalDataset_AFHQ_Class", # ConditionalDataset_AFHQ_Class or ConditionalDataset_LHQ_Paint
framework = "DDPM", framework = "CDM",
trainloop_function = "ddpm_trainer", trainloop_function = "cdm_trainer",
sampling_function = 'ddpm_sampler', sampling_function = 'cdm_sampler_afhq_class', # cdm_sampler_afhq_class or cdm_sampler_lhq_paint
evaluation_function = 'ddpm_evaluator', evaluation_function = 'cdm_evaluator_afhq_class', # cdm_evaluator_afhq_class or cdm_evaluator_lhq_paint
batchsize = batchsize batchsize = batchsize
) )
dataset_setting = dict(fpath = datapath, dataset_setting = dict(fpath = datapath,
img_size = image_size, img_size = image_size,
frac =0.8, frac =0.8,
skip_first_n = 0, skip_first_n = 0,
ext = ".png", ext = ".png",
transform=True transform=True
) )
model_setting = dict( n_channels=64, model_setting = dict( n_channels=64,
fctr = [1,2,4,4,8], fctr = [1,2,4,4,8],
time_dim=256, time_dim=256,
attention = True, attention = True,
) )
"""
outdated
model_setting = dict( channels_in=channels,
channels_out =channels ,
activation='relu', # activation function. Options: {'relu', 'leakyrelu', 'selu', 'gelu', 'silu'/'swish'}
weight_init='he', # weight initialization. Options: {'he', 'torch'}
projection_features=64, # number of image features after first convolution layer
time_dim=batchsize, #dont chnage!!!
time_channels=diffusion_steps, # number of time channels #TODO same as diffusion steps?
num_stages=4, # number of stages in contracting/expansive path
stage_list=None, # specify number of features produced by stages
num_blocks=1, # number of ConvResBlock in each contracting/expansive path
num_groupnorm_groups=32, # number of groups used in Group Normalization inside a ConvResBlock
dropout=0.1, # drop-out to be applied inside a ConvResBlock
attention_list=None, # specify MHA pattern across stages
num_attention_heads=1,
)
"""
framework_setting = dict( framework_setting = dict(
diffusion_steps = diffusion_steps, # dont change!! diffusion_steps = diffusion_steps, # dont change!!
out_shape = (channels,image_size,image_size), # dont change!! out_shape = (channels,image_size,image_size), # dont change!!
noise_schedule = 'linear', noise_schedule = 'linear',
beta_1 = 1e-4, beta_1 = 1e-4,
beta_T = 0.02, beta_T = 0.02,
alpha_bar_lower_bound = 0.9, alpha_bar_lower_bound = 0.9,
var_schedule = 'same', var_schedule = 'same',
kl_loss = 'simplified', kl_loss = 'simplified',
recon_loss = 'none', recon_loss = 'none',
) )
training_setting = dict( training_setting = dict(
epochs = epochs, epochs = epochs,
store_iter = store_iter, store_iter = store_iter,
eval_iter = eval_iter, eval_iter = eval_iter,
optimizer_class=optimizername, optimizer_class=optimizername,
optimizer_params = optimizer_params, optimizer_params = optimizer_params,
#optimizer_params=dict(lr=learning_rate), # don't change! #optimizer_params=dict(lr=learning_rate), # don't change!
learning_rate = learning_rate, learning_rate = learning_rate,
run_name=run_name, run_name=run_name,
checkpoint= checkpoint, checkpoint= checkpoint,
experiment_path = experiment_path, experiment_path = experiment_path,
verbose = verbose, verbose = verbose,
T_max = 0.8*90000/32*100, # cosine lr param len(train_ds)/batchsize * total epochs to 0 T_max = 0.8*90000/32*100, # cosine lr param len(train_ds)/batchsize * total epochs to 0
eta_min= 1e-10, # cosine lr param eta_min= 1e-10, # cosine lr param
) )
sampling_setting = dict( sampling_setting = dict(
checkpoint = checkpoint, checkpoint = checkpoint,
experiment_path = experiment_path, experiment_path = experiment_path,
batch_size = sample_size, batch_size = sample_size,
intermediate = intermediate, intermediate = intermediate,
sample_all = sample_all sample_all = sample_all
) )
# TODO
evaluation_setting = dict( evaluation_setting = dict(
checkpoint = checkpoint, experiment_path=experiment_path,
experiment_path = experiment_path, realpath=eval_realpath,
genpath=eval_genpath,
size=eval_size,
arch=eval_arch,
mode=eval_mode,
k=eval_k_kNN,
sample=eval_sample,
name_appendix=eval_name_appendix,
fid=eval_fid
) )
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import os import os
import json import json
f = local_path f = local_path
if os.path.exists(f): if os.path.exists(f):
print("path already exists, pick a new name!") print("path already exists, pick a new name!")
print("break") print("break")
else: else:
print("create folder") print("create folder")
#os.mkdir(f) #os.mkdir(f)
os.makedirs(f, exist_ok=True) os.makedirs(f, exist_ok=True)
print("folder created ") print("folder created ")
with open(f+"/meta_setting.json","w+") as fp: with open(f+"/meta_setting.json","w+") as fp:
json.dump(meta_setting,fp) json.dump(meta_setting,fp)
with open(f+"/dataset_setting.json","w+") as fp: with open(f+"/dataset_setting.json","w+") as fp:
json.dump(dataset_setting,fp) json.dump(dataset_setting,fp)
with open(f+"/model_setting.json","w+") as fp: with open(f+"/model_setting.json","w+") as fp:
json.dump(model_setting,fp) json.dump(model_setting,fp)
with open(f+"/framework_setting.json","w+") as fp: with open(f+"/framework_setting.json","w+") as fp:
json.dump(framework_setting,fp) json.dump(framework_setting,fp)
with open(f+"/training_setting.json","w+") as fp: with open(f+"/training_setting.json","w+") as fp:
json.dump(training_setting,fp) json.dump(training_setting,fp)
with open(f+"/sampling_setting.json","w+") as fp: with open(f+"/sampling_setting.json","w+") as fp:
json.dump(sampling_setting,fp) json.dump(sampling_setting,fp)
with open(f+"/evaluation_setting.json","w+") as fp: with open(f+"/evaluation_setting.json","w+") as fp:
json.dump(evaluation_setting,fp) json.dump(evaluation_setting,fp)
print("stored json files in folder") print("stored json files in folder")
print(meta_setting) print(meta_setting)
print(dataset_setting) print(dataset_setting)
print(model_setting) print(model_setting)
print(framework_setting) print(framework_setting)
print(training_setting) print(training_setting)
print(sampling_setting) print(sampling_setting)
print(evaluation_setting) print(evaluation_setting)
``` ```
%% Output %% Output
create folder create folder
folder created folder created
stored json files in folder stored json files in folder
{'modelname': 'UNet_Res', 'dataset': 'UnconditionalDataset', 'framework': 'DDPM', 'trainloop_function': 'ddpm_trainer', 'sampling_function': 'ddpm_sampler', 'evaluation_function': 'ddpm_evaluator', 'batchsize': 32} {'modelname': 'UNet_Res', 'dataset': 'UnconditionalDataset', 'framework': 'DDPM', 'trainloop_function': 'ddpm_trainer', 'sampling_function': 'ddpm_sampler', 'evaluation_function': 'cdm_evaluator', 'batchsize': 32}
{'fpath': '/work/lect0100/lhq_256', 'img_size': 128, 'frac': 0.8, 'skip_first_n': 0, 'ext': '.png', 'transform': True} {'fpath': '/home/wn455752/repo/evaluation/data/afhq', 'img_size': 128, 'frac': 0.8, 'skip_first_n': 0, 'ext': '.png', 'transform': True}
{'n_channels': 64, 'fctr': [1, 2, 4, 4, 8], 'time_dim': 256, 'attention': True} {'n_channels': 64, 'fctr': [1, 2, 4, 4, 8], 'time_dim': 256, 'attention': True}
{'diffusion_steps': 1000, 'out_shape': (3, 128, 128), 'noise_schedule': 'linear', 'beta_1': 0.0001, 'beta_T': 0.02, 'alpha_bar_lower_bound': 0.9, 'var_schedule': 'same', 'kl_loss': 'simplified', 'recon_loss': 'none'} {'diffusion_steps': 1000, 'out_shape': (3, 128, 128), 'noise_schedule': 'linear', 'beta_1': 0.0001, 'beta_T': 0.02, 'alpha_bar_lower_bound': 0.9, 'var_schedule': 'same', 'kl_loss': 'simplified', 'recon_loss': 'none'}
{'epochs': 100, 'store_iter': 10, 'eval_iter': 500, 'optimizer_class': 'torch.optim.AdamW', 'optimizer_params': None, 'learning_rate': 0.0001, 'run_name': 'main_test1', 'checkpoint': None, 'experiment_path': '/work/lect0100/main_experiment/main_test1/', 'verbose': False, 'T_max': 225000.0, 'eta_min': 1e-10} {'epochs': 100, 'store_iter': 10, 'eval_iter': 500, 'optimizer_class': 'torch.optim.AdamW', 'optimizer_params': None, 'learning_rate': 0.0001, 'run_name': 'afhq_eval_ep_499_no_cfg', 'checkpoint': None, 'experiment_path': '/Users/roy/Desktop/Workspace/RWTH/SoSe 2023/Deep Learning Lab/DLL_vsc/conditional-diffusion/experiments/afhq_eval_ep_499_no_cfg/', 'verbose': False, 'T_max': 225000.0, 'eta_min': 1e-10}
{'checkpoint': None, 'experiment_path': '/work/lect0100/main_experiment/main_test1/', 'batch_size': 20, 'intermediate': False} {'checkpoint': None, 'experiment_path': '/Users/roy/Desktop/Workspace/RWTH/SoSe 2023/Deep Learning Lab/DLL_vsc/conditional-diffusion/experiments/afhq_eval_ep_499_no_cfg/', 'batch_size': 20, 'intermediate': False, 'sample_all': False}
{'checkpoint': None, 'experiment_path': '/work/lect0100/main_experiment/main_test1/'} {'experiment_path': '/Users/roy/Desktop/Workspace/RWTH/SoSe 2023/Deep Learning Lab/DLL_vsc/conditional-diffusion/experiments/afhq_eval_ep_499_no_cfg/', 'realpath': '/home/wn455752/repo/evaluation/data/afhq', 'genpath': '/home/wn455752/repo/evaluation/samples/afhq_samples/samples/epoch_499_no_cfg/epoch_500/sample_1', 'size': 128, 'arch': 'clip', 'mode': 'both', 'k': 3, 'sample': 10, 'name_appendix': 'afhq_eval_ep_499_no_cfg', 'fid': 'yes'}
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment