diff --git a/evaluation/evaluate.py b/evaluation/evaluate.py
index 2df0f2dd8c487be8eed97562a9410bf43c9a775d..727985e722a42481f6ba80d316d1a4608b54218d 100644
--- a/evaluation/evaluate.py
+++ b/evaluation/evaluate.py
@@ -12,8 +12,9 @@ from evaluation.helpers.kNN import *
 from evaluation.helpers.metrics import *
 
 
-def cdm_evaluator(experiment_path, realpath, genpath, size=128, arch='clip', mode='both', k=3, sample=10, name_appendix='', fid='no'):
+def cdm_evaluator_afhq_class(experiment_path, realpath, genpath, size=128, arch='clip', mode='both', k=3, sample=10, name_appendix='', fid='no'):
     
+    #device = "mps" if torch.backends.mps.is_available() else "cpu" 
     device = "cuda" if torch.cuda.is_available() else "cpu"
 
     print('device:', device)
@@ -174,3 +175,158 @@ def cdm_evaluator(experiment_path, realpath, genpath, size=128, arch='clip', mod
                         size=size,
                         name_appendix=name_appendix)
     print('Finish!')
+
+
+def cdm_evaluator_lhq_paint(experiment_path, realpath, genpath, size=128, arch='clip', mode='kNN', k=3, sample=10, name_appendix='', fid='no'):
+    
+    #device = "mps" if torch.backends.mps.is_available() else "cpu" 
+    device = "cuda" if torch.cuda.is_available() else "cpu"
+    
+    print('device:', device)
+
+    path_to_real_images = realpath                          # path to real images (assumes that there are 2 subdirectories - train and test)
+    path_to_generated_images = genpath                      # path to generated samples
+    size = size                                             # image resolution
+    arch = arch                                             # architecture to extract features - choose between 'cnn' and 'clip'
+    mode = mode                                             # qualitative eval mode - 'kNN' 
+    k_kNN = k                                               # value of k if mode=='kNN'
+    sample = sample                                         # for kNN, find kNNs of first 'sample' samples in the dir; for pairs, find top 'sample' closest ones
+    name_appendix = name_appendix                           # name appendix for evaluation files
+    fid_bool = fid                                          # whether to compute PSNR and SSIM
+    
+    print('Start')
+    
+    # change working directory to output folder (experiment_path/eval_output)
+    output_path = Path(os.path.join(experiment_path,'eval_output'))
+    if not output_path.is_dir():
+        os.mkdir(output_path)
+
+    # output path
+    os.chdir(output_path)
+    # create output text file, store evaluation metadata
+    txt_filename = 'evaluation_' + '_' + arch + '_' + mode + '-' + name_appendix + '.txt'
+    with open(txt_filename, 'w') as f:
+        f.write(f'Path to real images: {path_to_real_images}\n')
+        f.write(f'Path to generated images: {path_to_generated_images}\n')
+        f.write(f'Experiment on AFHQ dataset with images of resolution {size}x{size}\n')
+        f.write(f'Using {arch} model to extract features\n')
+        f.write(f'Plot of {mode} on {sample} samples\n')
+        f.write(f'Quantitative metrics computed: {fid_bool}\n')
+        
+    
+    # datapaths
+    path_to_training_images = path_to_real_images
+    
+
+    # compute quantitative metrics (FID, IS and variants)
+    if fid_bool == 'yes':
+        
+        # Content-invariant metrics
+        train_images = image_to_tensor(path_to_training_images, device=device)
+        generated = image_to_tensor(path_to_generated_images, device=device)
+
+        print('Computing PSNR and SSIM scores...')
+        psnr_score, ssim_score = compute_ssim_psnr_scores(train_images, generated, device)
+        with open(txt_filename, 'a') as f:
+            f.write(f'PSNR score: {psnr_score}\n')
+            f.write(f'SSIM score: {ssim_score}\n')
+        
+    
+    print(f'Loading model {arch}...')   
+    feature_flag = False
+    
+    # Quantitative Evaluations
+    
+    # load pre-trained models
+    pth = '/home/wn455752/repo/evaluation/features/lhq'
+    # load pretrained ResNet50 
+    if arch == 'cnn':
+        print('Loading pretrained ResNet50...')
+        path_to_pretrained_weights = '/home/wn455752/repo/evaluation/pretrained/resnet50_places365_pretrained/resnet50_places365_weights.pth'
+        weights = torch.load(path_to_pretrained_weights)
+        model = resnet50().to(device)
+        model.load_state_dict(weights)
+        transform = transforms.Compose([transforms.ToTensor(),                       # transform PIL.Image to torch.Tensor
+                                        transforms.Lambda(lambda x: x * 255)])       # scale values to VGG input range
+        with torch.no_grad():
+            model.eval()
+        print('Checking for existing training dataset features...')
+        # check for saved dataset features
+        resnet_pth = Path(os.path.join(str(pth), 'resnet_features'))
+        if resnet_pth.is_dir():
+            name_pth = Path(os.path.join(str(pth), 'resnet_features/real_name_list'))
+            if name_pth.is_file():
+                with open(name_pth, 'rb') as fp:
+                    real_names = pickle.load(fp)
+            feature_pth = Path(os.path.join(pth, 'resnet_features/real_image_features.pt'))
+            if name_pth.is_file():
+                print('Loading existing training dataset features...')
+                real_features = torch.load(feature_pth, map_location="cpu")
+                real_features = real_features.to(device)
+                feature_flag = True
+        else:
+            os.mkdir(pth)
+            os.mkdir(os.path.join(str(pth), 'resnet_features')) 
+            name_pth = Path(os.path.join(str(pth), 'resnet_features/real_name_list'))
+            feature_pth = Path(os.path.join(str(pth), 'resnet_features/real_image_features.pt'))
+    
+    # load CLIP
+    elif arch == 'clip':
+        print('Loading pretrained CLIP...')
+        model, transform = clip.load("ViT-B/32", device=device)
+        # check for saved dataset features
+        print('Checking for existing training dataset features...')
+        clip_pth = Path(os.path.join(str(pth), 'clip_features'))
+        if clip_pth.is_dir():    
+            name_pth = Path(os.path.join(pth, 'clip_features/real_name_list'))
+            if name_pth.is_file():
+                with open(name_pth, 'rb') as fp:
+                    real_names = pickle.load(fp)
+            feature_pth = Path(os.path.join(pth, 'clip_features/real_image_features.pt'))
+            if name_pth.is_file():
+                print('Loading existing training dataset features...')
+                real_features = torch.load(feature_pth, map_location="cpu")
+                real_features = real_features.to(device)
+                feature_flag = True
+        else:
+            os.mkdir(pth)
+            os.mkdir(os.path.join(str(pth), 'clip_features')) 
+            name_pth = Path(os.path.join(str(pth), 'clip_features/real_name_list'))
+            feature_pth = Path(os.path.join(str(pth), 'clip_features/real_image_features.pt'))
+
+
+    # Qualitative Evaluations
+
+    knn = kNN()
+    # collect images from directories and store in a tensor
+    if not feature_flag:
+        print('Collecting training images...')
+        real_names, real_tensor = knn.get_images(path_to_training_images, transform)
+        with open(name_pth, 'wb') as fp:
+            pickle.dump(real_names, fp)
+    print('Collecting generated images...')
+    generated_names, generated_tensor = knn.get_images(path_to_generated_images, transform)
+
+    # extract features from image tensors
+    if not feature_flag:
+        print('Extracting features from training images...')
+        real_features = knn.feature_extractor(real_tensor, model, device)
+        torch.save(real_features, feature_pth)
+    print('Extracting features from generated images...')
+    generated_features = knn.feature_extractor(generated_tensor, model, device)
+
+    if sample == 'all':
+        sample_size = len(generated_names)
+    else:
+        sample_size = int(sample)
+
+    print('Finding kNNs...')
+    knn.kNN(output_path,
+            real_names, generated_names, 
+            real_features, generated_features, 
+            path_to_training_images, path_to_generated_images, 
+            k=k_kNN, 
+            sample=sample_size, 
+            size=size,
+            name_appendix=name_appendix)
+    print('Finish!')
\ No newline at end of file
diff --git a/evaluation/evaluation_readme.md b/evaluation/evaluation_readme.md
index 4b0c24e6842143706ba3633c0adf5214ed12224f..8ee876a12c42b7ae1c4c3837026caaccf2e5593e 100644
--- a/evaluation/evaluation_readme.md
+++ b/evaluation/evaluation_readme.md
@@ -14,6 +14,7 @@ These evaluations include -
 2. Inception score 
 3. Clean FID score (with CLIP) 
 4. FID infinity and IS infinity scores 
+These metrics are computed for class conditional diffusion model, as generated samples do not have any ground truth.
 
 Content invariant metrics are useful when the model output can be compared w.r.t a ground truth. \
 For example, our model can output the reconstructed version of an input training image (following the entire forward \
@@ -21,6 +22,7 @@ and reverse trajectories). \
 These evaluation include -
 1. SSIM (Structural Similarity Index Metric)
 2. PSNR 
+These metrics are computed for the inpainting model to compare the inpainted output with ground truth sample.
 
 
 ### Qualitative evaluations -
@@ -35,15 +37,19 @@ extracted by using a pretrained model (ResNet50-Places365/VGGFace or CLIP). Base
 1. kNN - plot the k nearest neighbors of the generated samples 
 2. Closest pairs - plot the top pairs with smallest MSE value 
 
+For inpainting model, only closest pairs are computed to find those inpainted outputs that look the most similar to the \
+raw image before applying the mask.
 
 ### Argumnets - 
 
-Execution starts with evaluate_full.py file. Input arguments are - 
+Execution starts with evaluate.py file. 
+
+For Class Conditional Diffusion Model, the function ```cdm_evaluator_afhq_class``` is called with the following arguments:
 
 * <pre>-rp, --realpath : Path to real images (string) </pre>
 * <pre>-gp, --genpath  : Path to generated images (string) </pre>
 * <pre>--size          : Resolution of images the model was trained on, default 128 (int) </pre>                  
-* <pre>-a, --arch      : Choose between 'cnn' and 'clip'. Chosen pretrained model is used to extract features from the images.
+* <pre>-a, --arch      : Pretrained model is used to extract features from the images.
                          Default = 'clip' (string) 
                          **!!! Currently no CNN models are supported**</pre>
 * <pre>-m, --mode      : Choose between 'kNN' and 'pairs' (for closest pairs) or both, default = 'both' (string) </pre>
@@ -54,11 +60,28 @@ Execution starts with evaluate_full.py file. Input arguments are -
 * <pre>-n, --name      : Name appendix (string) </pre>
 * <pre>--fid           : Choose between 'yes' and 'no'. Compute FID, Inception score and their variants. Default 'no' (string)   </pre>
 
+For Inpainting Model, the function ```cdm_evaluator_lhq_paint``` is called with the following arguments:
+
+* <pre>-rp, --realpath : Path to real images (string) </pre>
+* <pre>-gp, --genpath  : Path to generated images (string) </pre>
+* <pre>--size          : Resolution of images the model was trained on, default 128 (int) </pre>                  
+* <pre>-a, --arch      : Choose between 'clip' and 'cnn'. Pretrained model is used to extract features from the images.
+                         Default = 'clip' (string) </pre>
+* <pre>-m, --mode      : Default = 'pairs' (string) </pre>
+* <pre>-k, --k         : k value for kNN, default = 3 (int) </pre>
+* <pre>-s, --sample    : Choose between an int and 'all'. If mode is 'kNN', plot kNN for this many samples (first s samples 
+                         in the directory of generated images). If mode is 'pairs', plot the top s closest pairs from entire 
+                         directory of generated images. Default 10 (int or 'all') </pre>
+* <pre>-n, --name      : Name appendix (string) </pre>
+* <pre>--fid           : Choose between 'yes' and 'no'. Compute FID, Inception score and their variants. Default 'no' (string)   </pre>
 
 Path to real images leads to a directory with two sub-directories - train and test.
 
 <pre>
 data 
+|_ lhq 
+|    |_ train 
+|    |_ test 
 |_ afhq 
 |    |_ train 
 |           |_ cat
@@ -70,10 +93,11 @@ data
 |           |_ wild
 </pre>
 
-CLIP features of training images are saved after the first execution. This alleviates the need to recompute \
+CLIP (or CNN) features of training images are saved after the first execution. This alleviates the need to recompute \
 features of real images for different sets of generated samples.
 
 
 ### Links
-3. Clean FID - https://github.com/GaParmar/clean-fid/tree/main
-4. FID infinity, IS infinity - https://github.com/mchong6/FID_IS_infinity/tree/master
\ No newline at end of file
+1. ResNet50 pretrained on Places365 - https://github.com/CSAILVision/places365
+2. Clean FID - https://github.com/GaParmar/clean-fid/tree/main
+3. FID infinity, IS infinity - https://github.com/mchong6/FID_IS_infinity/tree/master
\ No newline at end of file
diff --git a/experiment_creator.ipynb b/experiment_creator.ipynb
index bb4e02dc6ef378162e62efd8c0d1cfaf728d208a..3cb6592ee27568eacc7b1ac8d766ef5d9ef411ee 100644
--- a/experiment_creator.ipynb
+++ b/experiment_creator.ipynb
@@ -2,7 +2,7 @@
  "cells": [
   {
    "cell_type": "code",
-   "execution_count": 14,
+   "execution_count": 9,
    "metadata": {
     "scrolled": true
    },
@@ -10,8 +10,8 @@
    "source": [
     "from trainer.train import *\n",
     "from dataloader.load import  *\n",
-    "from models.Framework import *\n",
-    "from models.all_unets import *\n",
+    "from models.ConditionalDiffusionModel import *\n",
+    "from models.conditional_unet import *\n",
     "import torch \n",
     "from torch import nn "
    ]
@@ -33,7 +33,7 @@
   },
   {
    "cell_type": "code",
-   "execution_count": 17,
+   "execution_count": 5,
    "metadata": {},
    "outputs": [],
    "source": [
@@ -44,12 +44,13 @@
     "####\n",
     "\n",
     "# Dataset path\n",
-    "datapath = \"/work/lect0100/lhq_256\"\n",
+    "datapath = \"/home/wn455752/repo/evaluation/data/afhq\"\n",
     "\n",
     "# Experiment setup\n",
-    "run_name = 'main_test1' # WANDB and experiment folder Name!\n",
-    "checkpoint = None #'model_epoch_8.pth' # Name of checkpoint pth file or None \n",
-    "experiment_path = \"/work/lect0100/main_experiment/\" + run_name +'/'\n",
+    "run_name = 'afhq_eval_ep_499_no_cfg'            # WANDB and experiment folder Name!\n",
+    "checkpoint = None #'model_epoch_8.pth'          # Name of checkpoint pth file or None \n",
+    "\n",
+    "experiment_path = '/home/wn455752/repo/conditional-diffusion/experiments/' + run_name + '/'\n",
     "\n",
     "# Path to save generated experiment folder on local machine\n",
     "local_path =\"experiments/\" + run_name + '/settings'\n",
@@ -76,7 +77,31 @@
     "sample_all=False\n",
     "\n",
     "# Evaluating\n",
-    "...\n",
+    "# class conditional\n",
+    "eval_realpath = '/home/wn455752/repo/evaluation/data/afhq'              # path to real images (assumes the dir has two subdirs - train and test)\n",
+    "eval_genpath = '/home/wn455752/repo/evaluation/samples/afhq_samples/samples/epoch_499_no_cfg/epoch_500/sample_1'        # path to sampled images\n",
+    "eval_size=image_size        # resolution of training images\n",
+    "eval_arch='clip'            # DO NOT CHANGE\n",
+    "eval_mode='both'            # choose between 'kNN' and 'pairs' (for closest pairs) or 'both'\n",
+    "eval_k_kNN=3                # choose k for kNN\n",
+    "eval_sample=10              # in case of kNN, find kNN of first 'sample' number of generated samples\n",
+    "                            # in case of pairs, find top 'sample' number of closest pairs of \n",
+    "                            # real-generated images from the entire set of generated samples\n",
+    "eval_fid='yes'              # whether to compute FID, IS scores (for class conditional)\n",
+    "eval_name_appendix='afhq_eval_ep_499_no_cfg'    # name appendix\n",
+    "\n",
+    "# inpainting\n",
+    "#eval_realpath = '/home/wn455752/repo/evaluation/samples/inpainting_samples/paint_lhq/samples/epoch_190/sample_12/raw/'       # path to real images (assumes the dir has two subdirs - train and test)\n",
+    "#eval_genpath = '/home/wn455752/repo/evaluation/samples/inpainting_samples/paint_lhq/samples/epoch_190/sample_12/inpaint/'        # path to sampled images\n",
+    "#eval_size=image_size        # resolution of training images\n",
+    "#eval_arch='clip'            # choose between 'clip' or 'cnn'\n",
+    "#eval_mode='pairs'           # DO NOT CHANGE\n",
+    "#eval_k_kNN=3                # choose k for kNN\n",
+    "#eval_sample=20              # in case of kNN, find kNN of first 'sample' number of generated samples\n",
+    "                            # in case of pairs, find top 'sample' number of closest pairs of \n",
+    "                            # real-generated images from the entire set of generated samples\n",
+    "#eval_fid='yes'              # whether to compute PSNR & SSIM\n",
+    "#eval_name_appendix='inpaint_eval_ep_190'    # name appendix\n",
     "\n",
     "\n",
     "\n",
@@ -84,12 +109,12 @@
     "# Advanced Settings Dictionaries\n",
     "###\n",
     "\n",
-    "meta_setting = dict(modelname = \"UNet_Res\",\n",
-    "                    dataset = \"UnconditionalDataset\",\n",
-    "                    framework = \"DDPM\",\n",
-    "                    trainloop_function = \"ddpm_trainer\",\n",
-    "                    sampling_function = 'ddpm_sampler',\n",
-    "                    evaluation_function = 'ddpm_evaluator',\n",
+    "meta_setting = dict(modelname = \"Conditional_UNet_Res\",                     \n",
+    "                    dataset = \"ConditionalDataset_AFHQ_Class\",              # ConditionalDataset_AFHQ_Class or ConditionalDataset_LHQ_Paint\n",
+    "                    framework = \"CDM\",\n",
+    "                    trainloop_function = \"cdm_trainer\",                     \n",
+    "                    sampling_function = 'cdm_sampler_afhq_class',           # cdm_sampler_afhq_class or cdm_sampler_lhq_paint\n",
+    "                    evaluation_function = 'cdm_evaluator_afhq_class',       # cdm_evaluator_afhq_class or cdm_evaluator_lhq_paint\n",
     "                    batchsize = batchsize\n",
     "                    )\n",
     "dataset_setting = dict(fpath = datapath,\n",
@@ -105,24 +130,7 @@
     "                      time_dim=256,\n",
     "                      attention = True,\n",
     "                    )\n",
-    "\"\"\"\n",
-    "outdated\n",
-    "model_setting = dict( channels_in=channels,                 \n",
-    "               channels_out =channels ,                \n",
-    "               activation='relu',           # activation function. Options: {'relu', 'leakyrelu', 'selu', 'gelu', 'silu'/'swish'}\n",
-    "               weight_init='he',            # weight initialization. Options: {'he', 'torch'}\n",
-    "               projection_features=64,      # number of image features after first convolution layer\n",
-    "               time_dim=batchsize,                 #dont chnage!!!\n",
-    "               time_channels=diffusion_steps,           # number of time channels #TODO same as diffusion steps? \n",
-    "               num_stages=4,                # number of stages in contracting/expansive path\n",
-    "               stage_list=None,             # specify number of features produced by stages\n",
-    "               num_blocks=1,                # number of ConvResBlock in each contracting/expansive path\n",
-    "               num_groupnorm_groups=32,     # number of groups used in Group Normalization inside a ConvResBlock\n",
-    "               dropout=0.1,                 # drop-out to be applied inside a ConvResBlock\n",
-    "               attention_list=None,         # specify MHA pattern across stages\n",
-    "               num_attention_heads=1,\n",
-    "               )\n",
-    "\"\"\"\n",
+    "\n",
     "framework_setting = dict(\n",
     "                 diffusion_steps = diffusion_steps,  # dont change!!\n",
     "                 out_shape = (channels,image_size,image_size),  # dont change!!\n",
@@ -156,16 +164,23 @@
     "                intermediate = intermediate,\n",
     "                sample_all = sample_all\n",
     "                )\n",
-    "# TODO\n",
     "evaluation_setting = dict(\n",
-    "                    checkpoint = checkpoint,\n",
-    "                    experiment_path = experiment_path,\n",
+    "                    experiment_path=experiment_path,\n",
+    "                    realpath=eval_realpath, \n",
+    "                    genpath=eval_genpath, \n",
+    "                    size=eval_size,\n",
+    "                    arch=eval_arch, \n",
+    "                    mode=eval_mode, \n",
+    "                    k=eval_k_kNN, \n",
+    "                    sample=eval_sample,\n",
+    "                    name_appendix=eval_name_appendix, \n",
+    "                    fid=eval_fid\n",
     "                    )                  "
    ]
   },
   {
    "cell_type": "code",
-   "execution_count": 18,
+   "execution_count": 6,
    "metadata": {},
    "outputs": [
     {
@@ -175,13 +190,13 @@
       "create folder\n",
       "folder created \n",
       "stored json files in folder\n",
-      "{'modelname': 'UNet_Res', 'dataset': 'UnconditionalDataset', 'framework': 'DDPM', 'trainloop_function': 'ddpm_trainer', 'sampling_function': 'ddpm_sampler', 'evaluation_function': 'ddpm_evaluator', 'batchsize': 32}\n",
-      "{'fpath': '/work/lect0100/lhq_256', 'img_size': 128, 'frac': 0.8, 'skip_first_n': 0, 'ext': '.png', 'transform': True}\n",
+      "{'modelname': 'UNet_Res', 'dataset': 'UnconditionalDataset', 'framework': 'DDPM', 'trainloop_function': 'ddpm_trainer', 'sampling_function': 'ddpm_sampler', 'evaluation_function': 'cdm_evaluator', 'batchsize': 32}\n",
+      "{'fpath': '/home/wn455752/repo/evaluation/data/afhq', 'img_size': 128, 'frac': 0.8, 'skip_first_n': 0, 'ext': '.png', 'transform': True}\n",
       "{'n_channels': 64, 'fctr': [1, 2, 4, 4, 8], 'time_dim': 256, 'attention': True}\n",
       "{'diffusion_steps': 1000, 'out_shape': (3, 128, 128), 'noise_schedule': 'linear', 'beta_1': 0.0001, 'beta_T': 0.02, 'alpha_bar_lower_bound': 0.9, 'var_schedule': 'same', 'kl_loss': 'simplified', 'recon_loss': 'none'}\n",
-      "{'epochs': 100, 'store_iter': 10, 'eval_iter': 500, 'optimizer_class': 'torch.optim.AdamW', 'optimizer_params': None, 'learning_rate': 0.0001, 'run_name': 'main_test1', 'checkpoint': None, 'experiment_path': '/work/lect0100/main_experiment/main_test1/', 'verbose': False, 'T_max': 225000.0, 'eta_min': 1e-10}\n",
-      "{'checkpoint': None, 'experiment_path': '/work/lect0100/main_experiment/main_test1/', 'batch_size': 20, 'intermediate': False}\n",
-      "{'checkpoint': None, 'experiment_path': '/work/lect0100/main_experiment/main_test1/'}\n"
+      "{'epochs': 100, 'store_iter': 10, 'eval_iter': 500, 'optimizer_class': 'torch.optim.AdamW', 'optimizer_params': None, 'learning_rate': 0.0001, 'run_name': 'afhq_eval_ep_499_no_cfg', 'checkpoint': None, 'experiment_path': '/Users/roy/Desktop/Workspace/RWTH/SoSe 2023/Deep Learning Lab/DLL_vsc/conditional-diffusion/experiments/afhq_eval_ep_499_no_cfg/', 'verbose': False, 'T_max': 225000.0, 'eta_min': 1e-10}\n",
+      "{'checkpoint': None, 'experiment_path': '/Users/roy/Desktop/Workspace/RWTH/SoSe 2023/Deep Learning Lab/DLL_vsc/conditional-diffusion/experiments/afhq_eval_ep_499_no_cfg/', 'batch_size': 20, 'intermediate': False, 'sample_all': False}\n",
+      "{'experiment_path': '/Users/roy/Desktop/Workspace/RWTH/SoSe 2023/Deep Learning Lab/DLL_vsc/conditional-diffusion/experiments/afhq_eval_ep_499_no_cfg/', 'realpath': '/home/wn455752/repo/evaluation/data/afhq', 'genpath': '/home/wn455752/repo/evaluation/samples/afhq_samples/samples/epoch_499_no_cfg/epoch_500/sample_1', 'size': 128, 'arch': 'clip', 'mode': 'both', 'k': 3, 'sample': 10, 'name_appendix': 'afhq_eval_ep_499_no_cfg', 'fid': 'yes'}\n"
      ]
     }
    ],
@@ -239,9 +254,9 @@
  ],
  "metadata": {
   "kernelspec": {
-   "display_name": "env",
+   "display_name": ".venv",
    "language": "python",
-   "name": "env"
+   "name": "python3"
   },
   "language_info": {
    "codemirror_mode": {
@@ -253,7 +268,7 @@
    "name": "python",
    "nbconvert_exporter": "python",
    "pygments_lexer": "ipython3",
-   "version": "3.10.6"
+   "version": "3.9.12"
   }
  },
  "nbformat": 4,