Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
Unconditional Diffusion
Manage
Activity
Members
Code
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Locked files
Deploy
Model registry
Analyze
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Terms and privacy
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
Diffusion Project
Unconditional Diffusion
Commits
25d26f13
Commit
25d26f13
authored
1 year ago
by
Gonzalo Martin Garcia
Browse files
Options
Downloads
Patches
Plain Diff
add .ipynb_checkpoints to .gitignore
parent
393e6d27
No related branches found
No related tags found
No related merge requests found
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
.gitignore
+2
-1
2 additions, 1 deletion
.gitignore
.ipynb_checkpoints/experiment_creator-checkpoint.ipynb
+0
-189
0 additions, 189 deletions
.ipynb_checkpoints/experiment_creator-checkpoint.ipynb
with
2 additions
and
190 deletions
.gitignore
+
2
−
1
View file @
25d26f13
...
...
@@ -3,4 +3,5 @@
*/trained_ddpm
root
experiments
.ipynb_checkpoints
trainer/__pycache__
This diff is collapsed.
Click to expand it.
.ipynb_checkpoints/experiment_creator-checkpoint.ipynb
deleted
100644 → 0
+
0
−
189
View file @
393e6d27
{
"cells": [
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"from trainer.train import *\n",
"from dataloader.load import *\n",
"from models.Framework import *\n",
"from models.unet_unconditional_diffusion import *\n",
"import torch \n",
"from torch import nn \n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Prepare experiment\n",
"1. Adapt settings below (for data path, only use absolute paths!!)\n",
"2. run both cells of the notebook, this creates a folder containing the json setting files \n",
"2. put the folder on the HPC\n",
"3. the following command starts the training `python main.py train \"<absolute path of folder in hpc>\"` add it to the batch file "
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {},
"outputs": [],
"source": [
"import torch \n",
"\n",
"#path to store, path to load data , path to checkpoint \n",
"\n",
"#basic settings:\n",
"learning_rate = 0.0001\n",
"batchsize = 8\n",
"datapath = \"/work/lect0100/lhq_256\"\n",
"checkpoint_path = None #when training from checkpoint\n",
"experimentname = \"/Users/gonzalo/Desktop/testing/\" + \"test1\" #always change experiment name! \n",
"epochs = 20\n",
"diffusion_steps = 25\n",
"image_size = 64\n",
"channels = 3\n",
"store_iter = 5\n",
"optimizername = \"torch.optim.AdamW\"\n",
"name_appendix = 'DM_bottleneck'# id for WANDB\n",
"\n",
"#advanced settings: change directly in dictionary \n",
"meta_setting = dict(modelname = \"UNet_Unconditional_Diffusion_Bottleneck_Variant\",\n",
" dataset = \"UnconditionalDataset\",\n",
" framework = \"DDPM\",\n",
" trainloop_function = \"ddpm_trainer\",\n",
" batchsize = batchsize,\n",
" )\n",
"\n",
"\n",
"dataset_setting = dict(fpath = datapath,\n",
" img_size = image_size,\n",
" frac =0.8,\n",
" skip_first_n = 0,\n",
" ext = \".png\",\n",
" transform=True\n",
" )\n",
"\n",
"\n",
"model_setting = dict( channels_in=channels, \n",
" channels_out =channels , \n",
" activation='relu', # activation function. Options: {'relu', 'leakyrelu', 'selu', 'gelu', 'silu'/'swish'}\n",
" weight_init='he', # weight initialization. Options: {'he', 'torch'}\n",
" projection_features=64, # number of image features after first convolution layer\n",
" time_dim=batchsize, #dont chnage!!!\n",
" time_channels=diffusion_steps, # number of time channels #TODO same as diffusion steps? \n",
" num_stages=4, # number of stages in contracting/expansive path\n",
" stage_list=None, # specify number of features produced by stages\n",
" num_blocks=1, # number of ConvResBlock in each contracting/expansive path\n",
" num_groupnorm_groups=32, # number of groups used in Group Normalization inside a ConvResBlock\n",
" dropout=0.1, # drop-out to be applied inside a ConvResBlock\n",
" attention_list=None, # specify MHA pattern across stages\n",
" num_attention_heads=1,\n",
" )\n",
"\n",
"\n",
"framework_setting = dict(\n",
" diffusion_steps = diffusion_steps, # dont change!!\n",
" out_shape = (channels,image_size,image_size), # dont change!!\n",
" noise_schedule = 'linear', \n",
" beta_1 = 1e-4, \n",
" beta_T = 0.02,\n",
" alpha_bar_lower_bound = 0.9,\n",
" var_schedule = 'same', \n",
" kl_loss = 'simplified', \n",
" recon_loss = 'none',\n",
" \n",
" )\n",
"\n",
"\n",
"training_setting = dict(\n",
" epochs = epochs,\n",
" store_iter = store_iter,\n",
" eval_iter = 3,\n",
" optimizer_class=optimizername, \n",
" optimizer_params=dict(lr=learning_rate), # don't change!\n",
" scheduler_class= None, \n",
" scheduler_params=None,\n",
" last_epoch=-1,\n",
" learning_rate = learning_rate,\n",
" lr_schedule = False,\n",
" verbose = True,\n",
" name_appendix=name_appendix,\n",
" checkpoint_path= checkpoint_path,\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"path already exists, pick a new name!\n",
"break\n"
]
}
],
"source": [
"import os\n",
"import json\n",
"f = experimentname\n",
"if os.path.exists(f):\n",
" print(\"path already exists, pick a new name!\")\n",
" print(\"break\")\n",
"else:\n",
" print(\"create folder\")\n",
" os.mkdir(f)\n",
" print(\"folder created \")\n",
" with open(f+\"/meta_setting.json\",\"w+\") as fp:\n",
" json.dump(meta_setting,fp)\n",
"\n",
" with open(f+\"/dataset_setting.json\",\"w+\") as fp:\n",
" json.dump(dataset_setting,fp)\n",
" \n",
" with open(f+\"/model_setting.json\",\"w+\") as fp:\n",
" json.dump(model_setting,fp)\n",
" \n",
" with open(f+\"/framework_setting.json\",\"w+\") as fp:\n",
" json.dump(framework_setting,fp)\n",
"\n",
" with open(f+\"/training_setting.json\",\"w+\") as fp:\n",
" json.dump(training_setting,fp)\n",
"\n",
" print(\"stored json files in folder\")\n",
" print(meta_setting)\n",
" print(dataset_setting)\n",
" print(model_setting)\n",
" print(framework_setting)\n",
" print(training_setting)\n",
" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3.9 (pytorch)",
"language": "python",
"name": "pytorch"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
%% Cell type:code id: tags:
```
python
from
trainer.train
import
*
from
dataloader.load
import
*
from
models.Framework
import
*
from
models.unet_unconditional_diffusion
import
*
import
torch
from
torch
import
nn
```
%% Cell type:markdown id: tags:
# Prepare experiment
1.
Adapt settings below (for data path, only use absolute paths!!)
2.
run both cells of the notebook, this creates a folder containing the json setting files
2.
put the folder on the HPC
3.
the following command starts the training
`python main.py train "<absolute path of folder in hpc>"`
add it to the batch file
%% Cell type:code id: tags:
```
python
import
torch
#path to store, path to load data , path to checkpoint
#basic settings:
learning_rate
=
0.0001
batchsize
=
8
datapath
=
"
/work/lect0100/lhq_256
"
checkpoint_path
=
None
#when training from checkpoint
experimentname
=
"
/Users/gonzalo/Desktop/testing/
"
+
"
test1
"
#always change experiment name!
epochs
=
20
diffusion_steps
=
25
image_size
=
64
channels
=
3
store_iter
=
5
optimizername
=
"
torch.optim.AdamW
"
name_appendix
=
'
DM_bottleneck
'
# id for WANDB
#advanced settings: change directly in dictionary
meta_setting
=
dict
(
modelname
=
"
UNet_Unconditional_Diffusion_Bottleneck_Variant
"
,
dataset
=
"
UnconditionalDataset
"
,
framework
=
"
DDPM
"
,
trainloop_function
=
"
ddpm_trainer
"
,
batchsize
=
batchsize
,
)
dataset_setting
=
dict
(
fpath
=
datapath
,
img_size
=
image_size
,
frac
=
0.8
,
skip_first_n
=
0
,
ext
=
"
.png
"
,
transform
=
True
)
model_setting
=
dict
(
channels_in
=
channels
,
channels_out
=
channels
,
activation
=
'
relu
'
,
# activation function. Options: {'relu', 'leakyrelu', 'selu', 'gelu', 'silu'/'swish'}
weight_init
=
'
he
'
,
# weight initialization. Options: {'he', 'torch'}
projection_features
=
64
,
# number of image features after first convolution layer
time_dim
=
batchsize
,
#dont chnage!!!
time_channels
=
diffusion_steps
,
# number of time channels #TODO same as diffusion steps?
num_stages
=
4
,
# number of stages in contracting/expansive path
stage_list
=
None
,
# specify number of features produced by stages
num_blocks
=
1
,
# number of ConvResBlock in each contracting/expansive path
num_groupnorm_groups
=
32
,
# number of groups used in Group Normalization inside a ConvResBlock
dropout
=
0.1
,
# drop-out to be applied inside a ConvResBlock
attention_list
=
None
,
# specify MHA pattern across stages
num_attention_heads
=
1
,
)
framework_setting
=
dict
(
diffusion_steps
=
diffusion_steps
,
# dont change!!
out_shape
=
(
channels
,
image_size
,
image_size
),
# dont change!!
noise_schedule
=
'
linear
'
,
beta_1
=
1e-4
,
beta_T
=
0.02
,
alpha_bar_lower_bound
=
0.9
,
var_schedule
=
'
same
'
,
kl_loss
=
'
simplified
'
,
recon_loss
=
'
none
'
,
)
training_setting
=
dict
(
epochs
=
epochs
,
store_iter
=
store_iter
,
eval_iter
=
3
,
optimizer_class
=
optimizername
,
optimizer_params
=
dict
(
lr
=
learning_rate
),
# don't change!
scheduler_class
=
None
,
scheduler_params
=
None
,
last_epoch
=-
1
,
learning_rate
=
learning_rate
,
lr_schedule
=
False
,
verbose
=
True
,
name_appendix
=
name_appendix
,
checkpoint_path
=
checkpoint_path
,
)
```
%% Cell type:code id: tags:
```
python
import
os
import
json
f
=
experimentname
if
os
.
path
.
exists
(
f
):
print
(
"
path already exists, pick a new name!
"
)
print
(
"
break
"
)
else
:
print
(
"
create folder
"
)
os
.
mkdir
(
f
)
print
(
"
folder created
"
)
with
open
(
f
+
"
/meta_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
meta_setting
,
fp
)
with
open
(
f
+
"
/dataset_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
dataset_setting
,
fp
)
with
open
(
f
+
"
/model_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
model_setting
,
fp
)
with
open
(
f
+
"
/framework_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
framework_setting
,
fp
)
with
open
(
f
+
"
/training_setting.json
"
,
"
w+
"
)
as
fp
:
json
.
dump
(
training_setting
,
fp
)
print
(
"
stored json files in folder
"
)
print
(
meta_setting
)
print
(
dataset_setting
)
print
(
model_setting
)
print
(
framework_setting
)
print
(
training_setting
)
```
%% Output
path already exists, pick a new name!
break
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment