Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Unconditional Diffusion
Manage
Activity
Members
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Locked files
Deploy
Model registry
Analyze
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Diffusion Project
Unconditional Diffusion
Commits
81a33135
Commit
81a33135
authored
2 years ago
by
Tobias Seibel
Browse files
Options
Downloads
Patches
Plain Diff
save raw tensor, normalize images,histogram
parent
af3e110d
No related branches found
No related tags found
No related merge requests found
Changes
3
Expand all
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
evaluation/sample.py
+7
-0
7 additions, 0 deletions
evaluation/sample.py
experiment_creator.ipynb
+7
-7
7 additions, 7 deletions
experiment_creator.ipynb
playground.ipynb
+249
-0
249 additions, 0 deletions
playground.ipynb
with
263 additions
and
7 deletions
evaluation/sample.py
+
7
−
0
View file @
81a33135
...
...
@@ -62,6 +62,13 @@ def ddpm_sampler(model, checkpoint, experiment_path, device, intermediate=False,
generated
=
model
.
sample
(
batch_size
=
batch_size
)
name
=
'
sample
'
#store the raw generated images within the tensor
torch
.
save
(
generated
,
os
.
path
.
join
(
sample_dir
,
f
"
image_tensor
{
j
}
"
))
#normalize to (-1,1)
a
=
generated
.
min
()
b
=
generated
.
max
()
A
,
B
=-
1
,
1
generated
=
(
generated
-
a
)
/
(
b
-
a
)
*
(
B
-
A
)
+
A
# save generated images
for
i
in
range
(
generated
.
size
(
0
)):
image
=
back2pil
(
generated
[
i
])
...
...
This diff is collapsed.
Click to expand it.
experiment_creator.ipynb
+
7
−
7
View file @
81a33135
...
...
@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1
1
,
"execution_count": 1
4
,
"metadata": {
"scrolled": true
},
...
...
@@ -33,7 +33,7 @@
},
{
"cell_type": "code",
"execution_count": 1
2
,
"execution_count": 1
5
,
"metadata": {},
"outputs": [],
"source": [
...
...
@@ -164,7 +164,7 @@
},
{
"cell_type": "code",
"execution_count": 1
3
,
"execution_count": 1
6
,
"metadata": {},
"outputs": [
{
...
...
@@ -176,11 +176,11 @@
"stored json files in folder\n",
"{'modelname': 'UNet_Res', 'dataset': 'UnconditionalDataset', 'framework': 'DDPM', 'trainloop_function': 'ddpm_trainer', 'sampling_function': 'ddpm_sampler', 'evaluation_function': 'ddpm_evaluator', 'batchsize': 32}\n",
"{'fpath': '/work/lect0100/lhq_256', 'img_size': 128, 'frac': 0.8, 'skip_first_n': 0, 'ext': '.png', 'transform': True}\n",
"{'n_channels': 64, 'fctr': [1, 2, 4, 4, 8], 'time_dim': 256}\n",
"{'n_channels': 64, 'fctr': [1, 2, 4, 4, 8], 'time_dim': 256
, 'attention': True
}\n",
"{'diffusion_steps': 500, 'out_shape': (3, 128, 128), 'noise_schedule': 'linear', 'beta_1': 0.0001, 'beta_T': 0.02, 'alpha_bar_lower_bound': 0.9, 'var_schedule': 'same', 'kl_loss': 'simplified', 'recon_loss': 'nll'}\n",
"{'epochs':
1
0, 'store_iter':
2
, 'eval_iter': 2, 'optimizer_class': 'torch.optim.AdamW', 'optimizer_params': None, 'learning_rate': 0.0001, 'run_name': 'main_test
ing
', 'checkpoint': None, 'experiment_path': '/work/lect0100/
tobi/main_tes
t/main_test
ing
/', 'verbose': True, 'T_max':
900000
0, 'eta_min': 1e-10}\n",
"{'checkpoint': None, 'experiment_path': '/work/lect0100/
tobi/main_tes
t/main_test
ing
/', 'batch_size':
1
0, 'intermediate': False}\n",
"{'checkpoint': None, 'experiment_path': '/work/lect0100/
tobi/main_tes
t/main_test
ing
/'}\n"
"{'epochs':
2
0, 'store_iter':
5
, 'eval_iter': 2, 'optimizer_class': 'torch.optim.AdamW', 'optimizer_params': None, 'learning_rate': 0.0001, 'run_name': 'main_test
0
', 'checkpoint': None, 'experiment_path': '/work/lect0100/
main_experimen
t/main_test
0
/', 'verbose': True, 'T_max':
337500.
0, 'eta_min': 1e-10}\n",
"{'checkpoint': None, 'experiment_path': '/work/lect0100/
main_experimen
t/main_test
0
/', 'batch_size':
2
0, 'intermediate': False}\n",
"{'checkpoint': None, 'experiment_path': '/work/lect0100/
main_experimen
t/main_test
0
/'}\n"
]
}
],
...
...
%% Cell type:code id: tags:
```
python
from
trainer.train
import
*
from
dataloader.load
import
*
from
models.Framework
import
*
from
models.all_unets
import
*
import
torch
from
torch
import
nn
```
%% Cell type:markdown id: tags:
# Prepare experiment
1.
Choose Hyperparameter Settings
2.
Run notebook on local maschine to generate experiment folder with the JSON files containing the settings
3.
scp experiment folder to the HPC
4.
Run Pipeline by adding following to batch file:
-
Train Model:
     
`python main.py train "<absolute path of experiment folder in hpc>"`
-
Sample Images:
   
`python main.py sample "<absolute path of experiment folder in hpc>"`
-
Evaluate Model:
   
`python main.py evaluate "<absolute path of experiment folder in hpc>"`
%% Cell type:code id: tags:
```
python
import
torch
####
# Settings
####
# Dataset path
datapath
=
"
/work/lect0100/lhq_256
"
# Experiment setup
run_name
=
'
main_test0
'
# WANDB and experiment folder Name!
checkpoint
=
None
#'model_epoch_8.pth' # Name of checkpoint pth file or None
experiment_path
=
"
/work/lect0100/main_experiment/
"
+
run_name
+
'
/
'
# Path to save generated experiment folder on local machine
local_path
=
"
experiments/
"
+
run_name
+
'
/settings
'
# Diffusion Model Settings
diffusion_steps
=
500
image_size
=
128
channels
=
3
# Training
batchsize
=
32
epochs
=
20
store_iter
=
5
eval_iter
=
2
learning_rate
=
0.0001
optimizername
=
"
torch.optim.AdamW
"
optimizer_params
=
None
verbose
=
True
# checkpoint = None #(If no checkpoint training, ie. random weights)
# Sampling
sample_size
=
20
intermediate
=
False
# True if you want to sample one image and all ist intermediate latents
# Evaluating
...
###
# Advanced Settings Dictionaries
###
meta_setting
=
dict
(
modelname
=
"
UNet_Res
"
,
dataset
=
"
UnconditionalDataset
"
,
framework
=
"
DDPM
"
,
trainloop_function
=
"
ddpm_trainer
"
,
sampling_function
=
'
ddpm_sampler
'
,
evaluation_function
=
'
ddpm_evaluator
'
,
batchsize
=
batchsize
)
dataset_setting
=
dict
(
fpath
=
datapath
,
img_size
=
image_size
,
frac
=
0.8
,
skip_first_n
=
0
,
ext
=
"
.png
"
,
transform
=
True
)
model_setting
=
dict
(
n_channels
=
64
,
fctr
=
[
1
,
2
,
4
,
4
,
8
],
time_dim
=
256
,
attention
=
True
,
)
"""
outdated
model_setting = dict( channels_in=channels,
channels_out =channels ,
activation=
'
relu
'
, # activation function. Options: {
'
relu
'
,
'
leakyrelu
'
,
'
selu
'
,
'
gelu
'
,
'
silu
'
/
'
swish
'
}
weight_init=
'
he
'
, # weight initialization. Options: {
'
he
'
,
'
torch
'
}
projection_features=64, # number of image features after first convolution layer
time_dim=batchsize, #dont chnage!!!
time_channels=diffusion_steps, # number of time channels #TODO same as diffusion steps?
num_stages=4, # number of stages in contracting/expansive path
stage_list=None, # specify number of features produced by stages
num_blocks=1, # number of ConvResBlock in each contracting/expansive path
num_groupnorm_groups=32, # number of groups used in Group Normalization inside a ConvResBlock
dropout=0.1, # drop-out to be applied inside a ConvResBlock
attention_list=None, # specify MHA pattern across stages
num_attention_heads=1,
)
"""
framework_setting
=
dict
(
diffusion_steps
=
diffusion_steps
,
# dont change!!
out_shape
=
(
channels
,
image_size
,
image_size
),
# dont change!!
noise_schedule
=
'
linear
'
,
beta_1
=
1e-4
,
beta_T
=
0.02
,
alpha_bar_lower_bound
=
0.9
,
var_schedule
=
'
same
'
,
kl_loss
=
'
simplified
'
,
recon_loss
=
'
nll
'
,
)
training_setting
=
dict
(
epochs
=
epochs
,
store_iter
=
store_iter
,
eval_iter
=
eval_iter
,
optimizer_class
=
optimizername
,
optimizer_params
=
optimizer_params
,
#optimizer_params=dict(lr=learning_rate), # don't change!
learning_rate
=
learning_rate
,
run_name
=
run_name
,
checkpoint
=
checkpoint
,
experiment_path
=
experiment_path
,
verbose
=
verbose
,
T_max
=
0.8
*
90000
/
32
*
150
,
# cosine lr param len(train_ds)/batchsize * total epochs to 0
eta_min
=
1e-10
,
# cosine lr param
)
sampling_setting
=
dict
(
checkpoint
=
checkpoint
,
experiment_path
=
experiment_path
,
batch_size
=
sample_size
,
intermediate
=
intermediate
)
# TODO
evaluation_setting
=
dict
(
checkpoint
=
checkpoint
,
experiment_path
=
experiment_path
,
)
```
%% Cell type:code id: tags:
```
python
import
os
import
json
f
=
local_path
if
os
.
path
.
exists
(
f
):
print
(
"
path already exists, pick a new name!
"
)
print
(
"
break
"
)
else
:
print
(
"
create folder
"
)
#os.mkdir(f)
os
.
makedirs
(
f
,
exist_ok
=
True
)
print
(
"
folder created
"
)
with
open
(
f
+
"
/meta_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
meta_setting
,
fp
)
with
open
(
f
+
"
/dataset_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
dataset_setting
,
fp
)
with
open
(
f
+
"
/model_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
model_setting
,
fp
)
with
open
(
f
+
"
/framework_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
framework_setting
,
fp
)
with
open
(
f
+
"
/training_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
training_setting
,
fp
)
with
open
(
f
+
"
/sampling_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
sampling_setting
,
fp
)
with
open
(
f
+
"
/evaluation_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
evaluation_setting
,
fp
)
print
(
"
stored json files in folder
"
)
print
(
meta_setting
)
print
(
dataset_setting
)
print
(
model_setting
)
print
(
framework_setting
)
print
(
training_setting
)
print
(
sampling_setting
)
print
(
evaluation_setting
)
```
%% Output
create folder
folder created
stored json files in folder
{'modelname': 'UNet_Res', 'dataset': 'UnconditionalDataset', 'framework': 'DDPM', 'trainloop_function': 'ddpm_trainer', 'sampling_function': 'ddpm_sampler', 'evaluation_function': 'ddpm_evaluator', 'batchsize': 32}
{'fpath': '/work/lect0100/lhq_256', 'img_size': 128, 'frac': 0.8, 'skip_first_n': 0, 'ext': '.png', 'transform': True}
{'n_channels': 64, 'fctr': [1, 2, 4, 4, 8], 'time_dim': 256}
{'n_channels': 64, 'fctr': [1, 2, 4, 4, 8], 'time_dim': 256
, 'attention': True
}
{'diffusion_steps': 500, 'out_shape': (3, 128, 128), 'noise_schedule': 'linear', 'beta_1': 0.0001, 'beta_T': 0.02, 'alpha_bar_lower_bound': 0.9, 'var_schedule': 'same', 'kl_loss': 'simplified', 'recon_loss': 'nll'}
{'epochs':
1
0, 'store_iter':
2
, 'eval_iter': 2, 'optimizer_class': 'torch.optim.AdamW', 'optimizer_params': None, 'learning_rate': 0.0001, 'run_name': 'main_test
ing
', 'checkpoint': None, 'experiment_path': '/work/lect0100/
tobi/main_tes
t/main_test
ing
/', 'verbose': True, 'T_max':
900000
0, 'eta_min': 1e-10}
{'checkpoint': None, 'experiment_path': '/work/lect0100/
tobi/main_tes
t/main_test
ing
/', 'batch_size':
1
0, 'intermediate': False}
{'checkpoint': None, 'experiment_path': '/work/lect0100/
tobi/main_tes
t/main_test
ing
/'}
{'epochs':
2
0, 'store_iter':
5
, 'eval_iter': 2, 'optimizer_class': 'torch.optim.AdamW', 'optimizer_params': None, 'learning_rate': 0.0001, 'run_name': 'main_test
0
', 'checkpoint': None, 'experiment_path': '/work/lect0100/
main_experimen
t/main_test
0
/', 'verbose': True, 'T_max':
337500.
0, 'eta_min': 1e-10}
{'checkpoint': None, 'experiment_path': '/work/lect0100/
main_experimen
t/main_test
0
/', 'batch_size':
2
0, 'intermediate': False}
{'checkpoint': None, 'experiment_path': '/work/lect0100/
main_experimen
t/main_test
0
/'}
%% Cell type:code id: tags:
```
python
```
...
...
This diff is collapsed.
Click to expand it.
playground.ipynb
+
249
−
0
View file @
81a33135
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment