diff --git a/classes/droplet/.gitkeep b/classes/droplet/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/classes/droplet/proces_manual_droplet.py b/classes/droplet/proces_manual_droplet.py
deleted file mode 100644
index ef177227491c97c949e982661f8d44066478eaa5..0000000000000000000000000000000000000000
--- a/classes/droplet/proces_manual_droplet.py
+++ /dev/null
@@ -1,335 +0,0 @@
-"""
-MRCNN Particle Detection
-Determine droplet diameters from manual processing.
-
-The source code of "MRCNN Particle Detection" (https://git.rwth-aachen.de/avt-fvt/private/mrcnn-particle-detection) 
-is based on the source code of "Mask R-CNN" (https://github.com/matterport/Mask_RCNN).
-
-The source code of "Mask R-CNN" is licensed under the MIT License (MIT).
-Copyright (c) 2017 Matterport, Inc.
-Written by Waleed Abdulla
-
-All source code modifications to the source code of "Mask R-CNN" in "MRCNN Particle Detection" 
-are licensed under the Eclipse Public License v2.0 (EPL 2.0).
-Copyright (c) 2022-2023 Fluid Process Engineering (AVT.FVT), RWTH Aachen University
-Edited by Stepan Sibirtsev, Mathias Neufang & Jakob Seiler
-
-The coyprights and license terms are given in LICENSE.
-
-Ideas and a small code snippets were adapted from these sources:
-https://github.com/mat02/Mask_RCNN
-"""  
-
-### ----------------------------------- ###
-### Necessary Parameters and Data Names ###
-### ----------------------------------- ###
-
-# is the script executed on the cluster, e.g., RWTH High Performance Computing cluster? True = yes, False = no
-cluster = False
-
-### please specify only for non-cluster evaluations 
-
-# generate detection masks? True = yes, False = no
-masks = False
-# input dataset path to find in "...\datasets\input\..."
-dataset_path = r"test"
-# path to save the output images "...\datasets\output\..."
-save_path = r"test" 
-# name of the excel results file to find in "...\datasets\output\..."
-name_result_file = "test" 
-# save n-th result image 
-save_nth_image = 1     
-# pixel size in [µm/px]. To read from Sopat log file enter pixelsize = 0
-pixelsize = 1  
-
-### specifications for filters
-
-# detect/mark oval droplets? True = yes, False = no
-detect_oval_droplets = True
-# minimum aspect ratio: filter for elliptical shapes            
-min_aspect_ratio = 0.9   
-# edge threshold: filter for image border intersecting droplets
-edge_threshold = 0.01
-
-### ----------------------------------- ###
-###             Initialization          ###
-### ----------------------------------- ###
-
-import os
-import sys
-import itertools
-import math
-import logging
-import json
-import re
-import random
-import cv2
-import pandas as pd
-from collections import OrderedDict
-import numpy as np
-import skimage.draw
-import matplotlib
-import matplotlib.pyplot as plt
-import matplotlib.patches as patches
-import matplotlib.lines as lines
-from matplotlib.patches import Polygon
-from pathlib import Path
-
-
-# Root directory of the project
-if cluster is False:
-    ROOT_DIR = os.path.abspath("")
-    DATASET_DIR = os.path.join(ROOT_DIR, "datasets\\input", dataset_path)
-    SAVE_DIR = os.path.join(ROOT_DIR, "datasets\\output", save_path)
-    EXCEL_DIR = os.path.join(SAVE_DIR, name_result_file + '.xlsx')
-    MASKS = masks
-    PIXELSIZE = pixelsize
-    EDGE_THRESHOLD = edge_threshold
-    MIN_ASPECT_RATIO = min_aspect_ratio
-    DETECT_OVAL_DROPLETS = detect_oval_droplets
-    SAVE_NTH_IMAGE = save_nth_image
-else:
-    import argparse
-    # Parse command line arguments
-    parser = argparse.ArgumentParser(
-        description='evaluation on cluster')
-    parser.add_argument('--dataset_path', required=True,
-                        help='Dataset path to find in Mask_R_CNN\datasets\input')
-    parser.add_argument('--save_path', required=True,
-                        help='Save path to find in Mask_R_CNN\datasets\output')
-    parser.add_argument('--name_result_file', required=True,
-                        help='Name of the excel result file to find in Mask_R_CNN\datasets\output')
-    parser.add_argument('--detect_oval_droplets', required=False,
-                        default=True,
-                        help="")
-    parser.add_argument('--pixelsize', required=False,
-                        default=1,
-                        help="")
-    parser.add_argument('--save_nth_image', required=False,
-                        default=1,
-                        help="")                        
-    parser.add_argument('--min_aspect_ratio', required=False,
-                        default=0.9,
-                        help="")
-    parser.add_argument('--edge_threshold', required=False,
-                        default=0.01,
-                        help="")
-    parser.add_argument('--masks', required=False,
-                        default=False,
-                        help='Generate detection masks?')
-    args = parser.parse_args()
-    ROOT_DIR = os.path.join("/rwthfs/rz/cluster", os.path.abspath("../.."))
-    DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", args.dataset_path)
-    SAVE_DIR = os.path.join(ROOT_DIR, "datasets/output", args.save_path)
-    EXCEL_DIR = os.path.join(SAVE_DIR, args.name_result_file + '.xlsx')
-    MASKS = bool(args.masks)
-    PIXELSIZE = int(args.pixelsize)
-    EDGE_THRESHOLD = int(args.edge_threshold)
-    MIN_ASPECT_RATIO = int(args.min_aspect_ratio)
-    DETECT_OVAL_DROPLETS = bool(args.detect_oval_droplets)
-    SAVE_NTH_IMAGE = int(args.save_nth_image)
-
-# read pixelsize from JSON-File (if input data is from a Sopat measurement)
-if PIXELSIZE == 0:
-    sopat_name = (DATASET_DIR + '/' + 'Sopat_Log.json')
-    sopat_name_new = (DATASET_DIR + '/Sopat_Log_New.json')
-
-    with open(sopat_name, "r",encoding="utf-8") as sopat_content:
-        content_lines = sopat_content.readlines()
-
-    current_line = 1
-    with open(sopat_name_new, "w",encoding="utf-8") as sopat_content_new:
-        for line in content_lines:
-            if current_line == 30:
-                pass
-            else:
-                sopat_content_new.write(line)
-            current_line += 1
-    sopat_data = json.load(open(sopat_name_new, "r", encoding="utf-8"))
-    PIXELSIZE = sopat_data["sopatCamControlAcquisitionLog"]["conversionMicronsPerPx"]
-
-# Import Mask RCNN
-sys.path.append(ROOT_DIR)  # To find local version of the library
-Path(SAVE_DIR).mkdir(parents=True, exist_ok=True)
-from mrcnn import utils
-from mrcnn import visualize
-from mrcnn.visualize import display_images
-import mrcnn.model as modellib
-from mrcnn.model import log
-from mrcnn.config import Config
-
-class DropletConfig(Config):
-    """Configuration for training on the toy  dataset.
-    Derives from the base Config class and overrides some values.
-    """
-    # Give the configuration a recognizable name
-    NAME = "droplet"
-
-    # NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
-    GPU_COUNT = 1
-
-    # Generate detection masks
-    #     False: Output only bounding boxes like in Faster-RCNN
-    #     True: Generate masks as in Mask-RCNN
-    if MASKS is True:
-        GENERATE_MASKS = True
-    else:
-        GENERATE_MASKS = False
-
-# Configurations
-config = DropletConfig()
-
-class DropletDataset(utils.Dataset):
-
-    def load_droplet(self, dataset_dir, subset):
-        """Load a subset of the Droplet dataset.
-        dataset_dir: Root directory of the dataset.
-        subset: Subset to load: train or val
-        """
-        # Add classes. We have only one class to add.
-        self.add_class("droplet", 1, "droplet")
-
-        # We mostly care about the x and y coordinates of each region
-        # Note: In VIA 2.0, regions was changed from a dict to a list.
-        annotations = json.load(open(os.path.join(dataset_dir, "test.json")))
-        annotations = list(annotations.values())  # don't need the dict keys
-        # The VIA tool saves images in the JSON even if they don't have any
-        # annotations. Skip unannotated images.
-        annotations = [a for a in annotations if a['regions']]
-        # Add images
-        for a in annotations:
-            # Get the x, y coordinaets of points of the polygons that make up
-            # the outline of each object instance. These are stores in the
-            # shape_attributes (see json format above)
-            # The if condition is needed to support VIA versions 1.x and 2.x.
-            if type(a['regions']) is dict:
-                polygons = [r['shape_attributes']
-                            for r in a['regions'].values()]
-            else:
-                polygons = [r['shape_attributes'] for r in a['regions']]
-
-            # load_mask() needs the image size to convert polygons to masks.
-            # Unfortunately, VIA doesn't include it in JSON, so we must read
-            # the image. This is only managable since the dataset is tiny.
-            image_path = os.path.join(dataset_dir, a['filename'])
-            image = skimage.io.imread(image_path)
-            height, width = image.shape[:2]
-
-            self.add_image(
-                "droplet",
-                image_id=a['filename'],  # use file name as a unique image id
-                path=image_path,
-                width=width, height=height,
-                polygons=polygons)
-
-    def load_mask(self, image_id):
-        """Generate instance masks for an image.
-       Returns:
-        masks: A bool array of shape [height, width, instance count] with
-            one mask per instance.
-        class_ids: a 1D array of class IDs of the instance masks.
-        """
-        # If not a droplet dataset image, delegate to parent class.
-        image_info = self.image_info[image_id]
-        if image_info["source"] != "droplet":
-            return super(self.__class__, self).load_mask(image_id)
-
-        # Convert polygons to a bitmap mask of shape
-        info = self.image_info[image_id]
-        mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
-                        dtype=np.uint8)
-
-        for i, p in enumerate(info["polygons"]):
-            # Get indexes of pixels inside the polygon and set them to 1
-            if p['name'] == 'polygon':
-                rr, cc = skimage.draw.polygon(
-                    p['all_points_y'], p['all_points_x'])
-
-            elif p['name'] == 'ellipse':
-                rr, cc = skimage.draw.ellipse(
-                    p['cy'], p['cx'], p['ry'], p['rx'])
-            else:
-                rr, cc = skimage.draw.circle(p['cy'], p['cx'], p['r'])
-            x = np.array((rr, cc)).T
-            d = np.array(
-                [i for i in x if (i[0] < info["height"] and i[0] > 0)])
-            e = np.array([i for i in d if (i[1] < info["width"] and i[1] > 0)])
-            rr = np.array([u[0] for u in e])
-            cc = np.array([u[1] for u in e])
-
-            if len(rr) == 0 or len(cc) == 0:
-                continue
-            mask[rr, cc, i] = 1
-
-        # Return mask, and array of class IDs of each instance. Since we have
-        # one class ID only, we return an array of 1s
-        return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
-
-    def image_reference(self, image_id):
-        """Return the path of the image."""
-        info = self.image_info[image_id]
-        if info["source"] == "droplet":
-            return info["path"]
-        else:
-            super(self.__class__, self).image_reference(image_id)
-
-dataset = DropletDataset()
-dataset.load_droplet(DATASET_DIR, None)
-dataset.prepare()
-
-def get_ax(rows=1, cols=1, size=8):
-    """Return a Matplotlib Axes array to be used in
-    all visualizations in the notebook. Provide a
-    central point to control graph sizes.
-    Adjust the size attribute to control how big to render images"""
-    _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
-    return ax
-
-# Load and display samples
-mean_diameter_total = []
-for image_id in dataset.image_ids:
-    image_width = image_name=dataset.image_info[image_id]['width']
-    image_height = image_name=dataset.image_info[image_id]['height']
-    image_name = dataset.image_info[image_id]['id']
-    image = dataset.load_image(image_id)
-    mask, class_ids = dataset.load_mask(image_id)
-    bbox = utils.extract_bboxes(mask)
-    colors = []
-    diameter_vis_list = []
-    for i in range(len(bbox)):
-        bbox_delta = (abs(bbox[i][1] - bbox[i][3]), abs(bbox[i][0] - bbox[i][2]))
-        diameter = ((bbox_delta[1]+1)*(bbox_delta[0]+1)**2)**(1/3)
-        #diameter = abs((bbox_delta[0] + bbox_delta[1] + 2) / 2)
-        diameter_vis_list.append(round(diameter * PIXELSIZE / 1000,3))
-        if DETECT_OVAL_DROPLETS is True:
-            if (
-                bbox[i][0] <= image_height*EDGE_THRESHOLD or bbox[i][1] <= image_width*EDGE_THRESHOLD or 
-                bbox[i][2] >= image_height*(1-EDGE_THRESHOLD) or bbox[i][3] >= image_width*(1-EDGE_THRESHOLD)
-                ):
-                colors.append((1, 0, 0))
-            else:
-                colors.append((0, 1, 0))
-                mean_diameter_total.append(diameter)           
-        else:
-            if (
-                bbox[i][0] <= image_height*EDGE_THRESHOLD or bbox[i][1] <= image_width*EDGE_THRESHOLD or 
-                bbox[i][2] >= image_height*(1-EDGE_THRESHOLD) or bbox[i][3] >= image_width*(1-EDGE_THRESHOLD) or
-                # checks if bbox is within allowed aspect ratio
-                bbox_delta[0] / bbox_delta[1] >= 1 / MIN_ASPECT_RATIO or
-                bbox_delta[0] / bbox_delta[1] <= MIN_ASPECT_RATIO               
-                ):
-                colors.append((1, 0, 0))
-            else:
-                colors.append((0, 1, 0))
-                mean_diameter_total.append(diameter)    
-    ax = get_ax(1)
-    if config.GENERATE_MASKS is True:
-        visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names, ax=ax, colors=colors, captions=diameter_vis_list,
-                                    save_dir=SAVE_DIR, img_name=image_name, save_img=True, number_saved_images=SAVE_NTH_IMAGE)
-    else:
-        visualize.display_instances(image, bbox, None, class_ids, dataset.class_names, ax=ax, show_mask=False, colors=colors, captions=diameter_vis_list,
-                                    title=None, save_dir=SAVE_DIR, img_name=image_name, save_img=True, number_saved_images=SAVE_NTH_IMAGE, counter_1=SAVE_NTH_IMAGE )
-    plt.close()
-
-### Convert Mean Diameter To Excel
-df = pd.DataFrame(mean_diameter_total).to_excel(EXCEL_DIR, header=False, index=False)
\ No newline at end of file
diff --git a/classes/droplet/process_automated_droplet.py b/classes/droplet/process_automated_droplet.py
deleted file mode 100644
index 9db5a4cfc97b4c83f36391232ddf81633e4391d4..0000000000000000000000000000000000000000
--- a/classes/droplet/process_automated_droplet.py
+++ /dev/null
@@ -1,714 +0,0 @@
-"""
-MRCNN Particle Detection
-Process images with MRCNN model trained on the droplet class.
-
-The source code of "MRCNN Particle Detection" (https://git.rwth-aachen.de/avt-fvt/private/mrcnn-particle-detection) 
-is based on the source code of "Mask R-CNN" (https://github.com/matterport/Mask_RCNN).
-
-The source code of "Mask R-CNN" is licensed under the MIT License (MIT).
-Copyright (c) 2017 Matterport, Inc.
-Written by Waleed Abdulla
-
-All source code modifications to the source code of "Mask R-CNN" in "MRCNN Particle Detection" 
-are licensed under the Eclipse Public License v2.0 (EPL 2.0).
-Copyright (c) 2022-2023 Fluid Process Engineering (AVT.FVT), RWTH Aachen University
-Edited by Stepan Sibirtsev, Mathias Neufang & Jakob Seiler
-
-The coyprights and license terms are given in LICENSE.
-
-Ideas and a small code snippets were adapted from these sources:
-https://github.com/mat02/Mask_RCNN
-"""   
-
-### ----------------------------------- ###
-### Necessary Parameters and Data Names ###
-### ----------------------------------- ###
-
-# is the script executed on the cluster, e.g., RWTH High Performance Computing cluster? True = yes, False = no
-cluster = False
-
-### please specify only for non-cluster evaluations 
-
-# generate detection masks? True = yes, False = no
-masks = False
-# is the program execution done on GPU or CPU? True = GPU, False = CPU
-device = True
-# input dataset path to find in "...\datasets\input\..."
-dataset_path = r"test"              
-# path to save the output images "...\datasets\output\..."
-save_path = r"test"                 
-# name of the excel results file to find in "...\datasets\output\..."
-name_result_file = "test"         
-# path of the MRCNN model to find in "...\models\..."
-weights_path = r"test"
-# MRCNN model name to find in "Mask_R_CNN\models\
-weights_name = r"test"
-# file format of images
-file_format = "jpg"
-# save n-th result image 
-save_nth_image = 1  
-# pixel size in [µm/px]. To read from Sopat log file enter pixelsize = 0
-pixelsize = 1
-# specify if you want the image to be center cropped before detection (x, y)
-image_crop = None #(1931, 1521)
-
-### specifications for the processing parameters
-
-# number of images to process with on each GPU. 
-# a 12GB GPU can typically handle 2 images of 1024x1024px.
-# adjust based on your GPU memory and image sizes. 
-# if only one GPU is used, this parameter is equivalent to batch size (BATCH_SIZE --> config.py).
-images_gpu = 1
-# max. image size
-# select the value the MRCNN model was trained with.
-image_max = 2048
-# skip detections with confidence < value
-confidence = 0.1
-
-### specifications for filters
-
-# detect reflections in droplets? Yes = True, No = False
-detect_reflections = False
-# detect/mark oval droplets? True = yes, False = no
-detect_oval_droplets = True
-# minimum aspect ratio: filter for elliptical shapes            
-min_aspect_ratio = 0.9     
-# detect adhesive droplets? 
-detect_adhesive_droplets = False
-# save coordinates of adhesive droplets detected
-save_coordinates = False
-# minimum velocity: threshold to filter adhesive droplets
-# minimum distance [% of droplet mean diameter] that a droplet has to travel between 2 frames
-min_velocity = 0.2
-# minimum size difference: threshold to filter adhesive droplets
-# [%] to be consindered a different droplet
-min_size_diff = 0.4
-# number of images that are being compared, this is necessary because adhesive droplets may not get detected every frame
-n_images_compared = 3
-# number of times a droplet has to be detected at a similar position to be defined as adhesive
-n_adhesive_high = 3
-n_adhesive_low = 2
-low_distance_threshold = 0.05
-# edge threshold: filter for image border intersecting droplets
-edge_tolerance = 0.01
-
-# use contrast adjustment? 0 = no, 1 = contrast limited adaptive histogramm equalization, 2 = contrast stretching  
-contrast = 0
-
-### ----------------------------------- ###
-###             Initialization          ###
-### ----------------------------------- ###
-
-from PIL import Image
-import os
-import json
-import sys
-import random
-import math
-import re
-import time
-import glob
-import itertools
-import numpy as np
-import tensorflow as tf
-import matplotlib
-import matplotlib.image as mpimg
-import matplotlib.pyplot as plt
-import matplotlib.patches as patches
-import cv2
-import pandas as pd
-from numpy import asarray
-from random import random
-from skimage import exposure
-from pathlib import Path
-matplotlib.use("agg")
-
-start_time = time.time()
-
-tf.to_float = lambda x: tf.cast(x, tf.float32)
-# Root directory of the project
-if cluster is False:
-    ROOT_DIR = os.path.abspath("")
-    WEIGHTS_DIR = os.path.join(ROOT_DIR, "models", weights_path, weights_name + '.h5')
-    DATASET_DIR = os.path.join(ROOT_DIR, "datasets\\input", dataset_path)
-    SAVE_DIR = os.path.join(ROOT_DIR, "datasets\\output", save_path)
-    EXCEL_DIR = os.path.join(SAVE_DIR, name_result_file + '.xlsx')
-    IMAGE_MAX = image_max
-    MASKS = masks
-    DEVICE = device
-    IMAGES_GPU = images_gpu
-    SAVE_NTH_IMAGE = save_nth_image
-    DETECT_OVAL_DROPLETS = detect_oval_droplets
-    DETECT_REFLECTIONS = detect_reflections
-    MIN_ASPECT_RATIO = min_aspect_ratio
-    PIXELSIZE = pixelsize
-    DETECT_ADHESIVE_DROPLETS = detect_adhesive_droplets
-    SAVE_COORDINATES = save_coordinates
-    MIN_VELOCITY = min_velocity
-    MIN_SIZE_DIFF = min_size_diff
-    N_IMAGES_COMPARED = n_images_compared
-    N_ADHESIVE_HIGH = n_adhesive_high
-    N_ADHESIVE_LOW = n_adhesive_low
-    LOW_DISTANCE_THRESHOLD = low_distance_threshold
-    EDGE_TOLERANCE = edge_tolerance
-    IMAGE_CROP = image_crop
-    CONTRAST = contrast
-    CONFIDENCE = confidence
-    FILE_FORMAT = file_format
-else:
-    import argparse
-    # Parse command line arguments
-    parser = argparse.ArgumentParser(
-        description='evaluation on cluster')
-    parser.add_argument('--dataset_path', required=True,
-                        help='Dataset path to find in Mask_R_CNN\datasets\input')
-    parser.add_argument('--save_path', required=True,
-                        help='Save path to find in Mask_R_CNN\datasets\output')
-    parser.add_argument('--name_result_file', required=True,
-                        help='Name of the excel result file to find in Mask_R_CNN\datasets\output')
-    parser.add_argument('--weights_path', required=True,
-                        help='Weights path to find in Mask_R_CNN\models')
-    parser.add_argument('--weights_name', required=True,
-                        help='Choose Neuronal Network / Epoch to find in Mask_R_CNN\models')
-    parser.add_argument('--file_format', required=True,
-                        help='')
-    parser.add_argument('--masks', required=False, type=str,
-                        default="False",
-                        help='Generate detection masks?')
-    parser.add_argument('--device', required=False, type=str,
-                        default="True",
-                        help='is the evaluation done on CPU or GPU? 1=GPU, 0=CPU')
-    parser.add_argument('--detect_oval_droplets', required=True, type=str,
-                        default="False",
-                        help="")
-    parser.add_argument('--detect_reflections', required=True, type=str,
-                        default="False",
-                        help="")                        
-    parser.add_argument('--detect_adhesive_droplets', required=False, type=str,
-                        default="False",
-                        help="") 
-    parser.add_argument('--save_coordinates', required=False, type=str,
-                        default="False",
-                        help="")                          
-    parser.add_argument('--images_gpu', required=False, type=int,
-                        default=1,
-                        help='Number of images to train with on each GPU')
-    parser.add_argument('--image_max', required=False, type=int,
-                        default=1024,
-                        help="max. image size")
-    parser.add_argument('--save_nth_image', required=False, type=int,
-                        default=1,
-                        help="")
-    parser.add_argument('--n_images_compared', required=False, type=int,
-                        default=3,
-                        help="")
-    parser.add_argument('--n_adhesive_high', required=False, type=int,
-                        default=3,
-                        help="")                     
-    parser.add_argument('--n_adhesive_low', required=False, type=int,
-                        default=2,
-                        help="")
-    parser.add_argument('--image_crop', required=False, type=int,
-                        default=None,
-                        help="")
-    parser.add_argument('--contrast', required=False, type=int,
-                        default=0,
-                        help="")
-    parser.add_argument('--min_aspect_ratio', required=False, type=float,
-                        default=0.9,
-                        help="")
-    parser.add_argument('--pixelsize', required=False, type=float,
-                        default=1,
-                        help="")
-    parser.add_argument('--min_velocity', required=False, type=float,
-                        default=0.2,
-                        help="")
-    parser.add_argument('--min_size_diff', required=False, type=float,
-                        default=0.4,
-                        help="")
-    parser.add_argument('--low_distance_threshold', required=False, type=float,
-                        default=0.05,
-                        help="")
-    parser.add_argument('--edge_tolerance', required=False, type=float,
-                        default=0.01,
-                        help="")
-    parser.add_argument('--confidence', required=False, type=float,
-                        default=0.5,
-                        help="")                        
-
-    args = parser.parse_args()
-    ROOT_DIR = os.path.join("/rwthfs/rz/cluster", os.path.abspath("../.."))
-    WEIGHTS_DIR = os.path.join(ROOT_DIR, "models", args.weights_path, args.weights_name + '.h5')
-    DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", args.dataset_path)
-    SAVE_DIR = os.path.join(ROOT_DIR, "datasets/output", args.save_path)
-    EXCEL_DIR = os.path.join(SAVE_DIR, args.name_result_file + '.xlsx')
-    FILE_FORMAT = args.file_format
-    if args.detect_oval_droplets == "True":
-        DETECT_OVAL_DROPLETS = True
-    elif args.detect_oval_droplets == "False":
-        DETECT_OVAL_DROPLETS = False
-    if args.detect_reflections == "True":
-        DETECT_REFLECTIONS = True
-    elif args.detect_reflections == "False":
-        DETECT_REFLECTIONS = False   
-    if args.masks == "True":
-        MASKS = True
-    elif args.masks == "False":
-        MASKS = False
-    if args.device == "True":
-        DEVICE = True
-    elif args.device == "False":
-        DEVICE = False
-    if args.detect_adhesive_droplets == "True":
-        DETECT_ADHESIVE_DROPLETS = True
-    elif args.detect_adhesive_droplets == "False":
-        DETECT_ADHESIVE_DROPLETS = False
-    if args.save_coordinates == "True":
-        SAVE_COORDINATES = True
-    elif args.save_coordinates == "False":
-        SAVE_COORDINATES = False        
-
-    #
-    IMAGE_MAX = args.image_max
-    IMAGES_GPU = args.images_gpu
-    SAVE_NTH_IMAGE = args.save_nth_image
-    N_IMAGES_COMPARED = args.n_images_compared
-    N_ADHESIVE_HIGH = args.n_adhesive_high
-    N_ADHESIVE_LOW = args.n_adhesive_low
-    IMAGE_CROP = args.image_crop
-    CONTRAST = args.contrast   
-    # 
-    MIN_ASPECT_RATIO = args.min_aspect_ratio
-    PIXELSIZE = args.pixelsize
-    MIN_VELOCITY = args.min_velocity
-    MIN_SIZE_DIFF = args.min_size_diff
-    LOW_DISTANCE_THRESHOLD = args.low_distance_threshold
-    EDGE_TOLERANCE = args.edge_tolerance
-    CONFIDENCE = args.confidence
-
-# Directory to save logs and trained model
-MODEL_DIR = os.path.join(ROOT_DIR, "models")
-Path(SAVE_DIR).mkdir(parents=True, exist_ok=True)
-
-### mean pixel detectieren
-images_mean_pixel = []
-images_path = glob.glob(DATASET_DIR + "/*." + FILE_FORMAT)
-for img_path in images_path:
-    img = cv2.imread(img_path)
-    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
-    if CONTRAST == 1:
-        # adaptive Equalization
-        img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-        img = exposure.equalize_adapthist(img)
-        img = img.astype('float32') * 255
-    images_mean_pixel.append(img)
-color_sum=[0,0,0]
-for img2 in images_mean_pixel:
-    pixels = asarray(img2)
-    pixels = pixels.astype('float32')
-    # calculate per-channel means and standard deviations
-    means = pixels.mean(axis=(0, 1), dtype='float64')
-    color_sum += means
-mean_pixel = color_sum/len(images_mean_pixel)
-
-# read pixelsize from JSON-File (if input data is from a Sopat measurement)
-if PIXELSIZE == 0:
-    sopat_find = [file for file in os.listdir(DATASET_DIR) if file.endswith('.json')]
-    sopat_name = (DATASET_DIR + '/' + sopat_find[0])
-    sopat_name_new = (DATASET_DIR + '/Sopat_Log.json')
-
-    with open(sopat_name, "r",encoding="utf-8") as sopat_content:
-        content_lines = sopat_content.readlines()
-
-    current_line = 1
-    with open(sopat_name_new, "w",encoding="utf-8") as sopat_content_new:
-        for line in content_lines:
-            if current_line == 30:
-                pass
-            else:
-                sopat_content_new.write(line)
-            current_line += 1
-    sopat_data = json.load(open(sopat_name_new, "r", encoding="utf-8"))
-    PIXELSIZE = sopat_data["sopatCamControlAcquisitionLog"]["conversionMicronsPerPx"]
-      
-# Import Mask RCNN
-sys.path.append(ROOT_DIR)  # To find local version of the library
-from mrcnn import utils
-from mrcnn import visualize
-from mrcnn.visualize import display_images
-import mrcnn.model as modellib
-from mrcnn.model import log
-from mrcnn.config import Config
-
-class DropletConfig(Config):
-    """Configuration for training on the toy  dataset.
-    Derives from the base Config class and overrides some values.
-    """
-    # Give the configuration a recognizable name
-    NAME = "droplet"
-
-    # NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
-    GPU_COUNT = 1
-
-    # Backbone network architecture
-    # Supported values are: resnet50, resnet101.
-    # You can also provide a callable that should have the signature
-    # of model.resnet_graph. If you do so, you need to supply a callable
-    # to COMPUTE_BACKBONE_SHAPE as well
-    BACKBONE = "resnet50"
-
-    # Generate detection masks
-    #     False: Output only bounding boxes like in Faster-RCNN
-    #     True: Generate masks as in Mask-RCNN
-    if MASKS is True:
-        GENERATE_MASKS = True
-    else: 
-        GENERATE_MASKS = False
-
-    # We use a GPU with 12GB memory, which can fit two images.
-    # Adjust down if you use a smaller GPU.
-    if DEVICE is True:
-        IMAGES_PER_GPU = IMAGES_GPU
-    else:
-        IMAGES_PER_GPU = 1
-
-    # Number of classes (including background)
-    NUM_CLASSES = 1 + 1  # Background + droplet
-
-    # Skip detections with confidence < value
-    DETECTION_MIN_CONFIDENCE = CONFIDENCE
-
-    # Input image resizing
-    IMAGE_MAX_DIM = IMAGE_MAX
-    IMAGE_MIN_DIM = IMAGE_MAX_DIM
-
-    MEAN_PIXEL = mean_pixel
-
-### Configurations
-config = DropletConfig()
-config.display()
-
-### Notebook Preferences
-
-# Device to load the neural network on.
-# Useful if you're training a model on the same 
-# machine, in which case use CPU and leave the
-# GPU for training.
-if DEVICE is True:
-    dev = "/gpu:0"  # /cpu:0 or /gpu:0
-else:
-    dev = "/cpu:0"  # /cpu:0 or /gpu:0
-
-# Inspect the model in training or inference modes
-# values: 'inference' or 'training'
-# TODO: code for 'training' test mode not ready yet
-TEST_MODE = "inference"
-
-def get_ax(rows=1, cols=1, size=8):
-    """Return a Matplotlib Axes array to be used in
-    all visualizations in the notebook. Provide a
-    central point to control graph sizes.
-    
-    Adjust the size attribute to control how big to render images
-    """
-    _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
-    return ax
-
-### Load Model
-# Create model in inference mode
-
-with tf.device(dev):
-    model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
-
-# Load weights
-    print("Loading weights ", WEIGHTS_DIR)
-    model.load_weights(WEIGHTS_DIR, by_name=True)
-
-### Run Detection
-class Droplet:
-    """Class to structure Droplet information in memory and calculate values for comparison
-    """
-
-    def __init__(self, roi, mask, img):
-        self.roi = roi
-        if config.GENERATE_MASKS:
-            self.mask = mask
-        # calculate edge lengths and center of roi. roi[1]-roi[3]=box_width. roi[0]-roi[2]=image_height
-        self.range = (abs(roi[1] - roi[3]), abs(roi[0] - roi[2]))
-        self.center = (abs((roi[0]+roi[2])//2), abs((roi[1]+roi[3])//2))
-        # self.mean_diameter = abs((self.range[0]+self.range[1]+2)/2)
-        self.mean_diameter = ((self.range[1]+1)*(self.range[0]+1)**2)**(1/3)
-        self.mean_diameter_mm = round(self.mean_diameter * PIXELSIZE / 1000, 3)
-        self.stuck = []
-        self.check_roi()
-        self.img = img
-
-    def check_roi(self):
-        """Run at Droplet creation to check for aspect ratio and whether it touches the edge
-        """
-        global DETECT_OVAL_DROPLETS, EDGE_TOLERANCE, MIN_ASPECT_RATIO, image_height, image_width
-        if DETECT_OVAL_DROPLETS is True:
-            if(
-                # checks if bbox touches the edge
-                self.roi[0] <= image_height*EDGE_TOLERANCE or self.roi[1] <= image_width*EDGE_TOLERANCE or 
-                self.roi[2] >= image_height*(1-EDGE_TOLERANCE) or self.roi[3] >= image_width*(1-EDGE_TOLERANCE) 
-            ):
-                self.fault = 1
-            else:
-                self.fault = 0
-        else:  
-            if(
-                # checks if bbox touches the edge
-                self.roi[0] <= image_height*EDGE_TOLERANCE or self.roi[1] <= image_width*EDGE_TOLERANCE or 
-                self.roi[2] >= image_height*(1-EDGE_TOLERANCE) or self.roi[3] >= image_width*(1-EDGE_TOLERANCE) or
-                # checks if bbox is within allowed aspect ratio
-                self.range[0] / self.range[1] >= 1 / MIN_ASPECT_RATIO or 
-                self.range[0] / self.range[1] <= MIN_ASPECT_RATIO
-            ):
-                self.fault = 1
-            else:
-                self.fault = 0                
-    # Parameter um klebende Tropfen zu erkennen
-    def distance(self, center):
-        """Returns distance to given coordinates
-        """
-        offset = np.array([center[0]-self.center[0], center[1]-self.center[1]])
-        dist = np.linalg.norm(offset)
-        dist /= self.mean_diameter
-        return dist
-
-    def size_difference(self, range):
-        """Returns size difference in percent compared to given ranges
-        """
-        size_diff = abs(
-            1-((self.range[0]*self.range[1]) / (range[0]*range[1])))
-        return size_diff
-
-def visualize_result(memory,counter_1):
-    """collects all necessary parameters from the first entry in memory and then calls visualize.display_instances()
-    Also appends droplet diameter to mean_diameter_total if no fault was determined
-    """
-    # Create Lists to pass onto display_instances()
-    global stuck_droplet_data
-    ax = get_ax(size=8)
-    rois, masks, colors, diameter_vis_list = [], [], [], []
-    xl = False
-    if memory[0][0]:
-        print(f"Visualizing {memory[0][0][0].img}")
-    for droplet in memory[0][0]:
-        rois.append(droplet.roi)
-        diameter_vis_list.append(droplet.mean_diameter_mm)
-        if config.GENERATE_MASKS:
-            masks.append(droplet.mask)
-        if droplet.fault == 0:
-            mean_diameter_total.append(droplet.mean_diameter)
-            colors.append((0, 1, 0))
-        elif droplet.fault == 1:
-            colors.append((1, 0, 0))
-        elif droplet.fault == 4:
-            colors.append((0, 0, 1))            
-        elif DETECT_ADHESIVE_DROPLETS is False:
-            mean_diameter_total.append(droplet.mean_diameter)
-            colors.append((0, 1, 0))
-        elif droplet.fault == 2:
-            if len(droplet.stuck) >= N_ADHESIVE_HIGH:
-                colors.append((1, 0.65, 0))
-                if not xl:
-                    stuck_droplet_data.append(
-                        [droplet.img, droplet.mean_diameter_mm, "", droplet.stuck[0][0], droplet.stuck[0][1],
-                        droplet.stuck[0][2], droplet.stuck[0][3], droplet.stuck[0][4]])
-                    xl = True
-                else:
-                    stuck_droplet_data.append(
-                        ["", droplet.mean_diameter_mm, "", droplet.stuck[0][0], droplet.stuck[0][1],
-                         droplet.stuck[0][2], droplet.stuck[0][3], droplet.stuck[0][4]])
-                
-                for data in droplet.stuck[1:]:
-                    stuck_droplet_data.append(
-                        ["", "", "", data[0], data[1], data[2], data[3], data[4]])
-            else:
-                mean_diameter_total.append(droplet.mean_diameter)
-                colors.append((0, 1, 0))
-        elif droplet.fault == 3:
-            if len(droplet.stuck) >= N_ADHESIVE_LOW:
-                colors.append((1, 0.65, 0))
-                if not xl: 
-                    stuck_droplet_data.append(
-                        [droplet.img, droplet.mean_diameter_mm, "<5%", droplet.stuck[0][0], droplet.stuck[0][1],
-                        droplet.stuck[0][2], droplet.stuck[0][3], droplet.stuck[0][4]])
-                    xl = True
-                else:
-                    stuck_droplet_data.append(
-                        ["", droplet.mean_diameter_mm, "<5%", droplet.stuck[0][0], droplet.stuck[0][1],
-                         droplet.stuck[0][2], droplet.stuck[0][3], droplet.stuck[0][4]])
-                for data in droplet.stuck[1:]:
-                    stuck_droplet_data.append(
-                        ["", "", "<5%", data[0], data[1], data[2], data[3], data[4]])
-            else:
-                mean_diameter_total.append(droplet.mean_diameter)
-                colors.append((0, 1, 0))
-                
-                
-    # Convert Lists to numpy arrays and create placeholders for class_ids and scores
-    rois = np.array(rois)
-    masks = np.array(masks)
-    class_ids = np.array(range(len(colors)))
-    scores = np.array([1]*len(colors))
-    img_name = "result_{}.jpg".format(os.path.splitext(memory[0][2])[0])
-    if masks.any():
-        masks = np.stack(masks, axis=-1)
-    if config.GENERATE_MASKS:
-        visualize.display_instances(memory[0][1], rois, masks, class_ids, scores, ax=ax, colors=colors, captions=diameter_vis_list,
-                                    title=None, save_dir=SAVE_DIR, img_name=img_name, save_img=True, number_saved_images=SAVE_NTH_IMAGE, counter_1=counter_1)
-    else:
-        visualize.display_instances(memory[0][1], rois, None, class_ids, scores, ax=ax, show_mask=False, colors=colors, captions=diameter_vis_list,
-                                    title=None, save_dir=SAVE_DIR, img_name=img_name, save_img=True, number_saved_images=SAVE_NTH_IMAGE, counter_1=counter_1)
-
-
-def pre_processing(image, crop=None, size_y=1024, size_x=1024, contrast=0):
-    """Crops image and optional contrast adjustments
-    """
-    if crop:
-        size_x = crop[0]
-        size_y = crop[1]
-        image_height, image_width, _ = image.shape
-        # center crop image to given size
-        crop_y = (image_height-size_y)//2
-        crop_x = (image_width-size_x)//2
-        image = image[crop_y:crop_y+size_y, crop_x:crop_x+size_x]
-        #im = Image.fromarray(image.astype(np.uint8))
-        #im.save(os.path.join(SAVE_DIR, 'test.bmp'))
-        # original = image.copy()
-    
-    if CONTRAST != 0:
-        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
-    if CONTRAST == 1:
-        # adaptive Equalization
-        image = exposure.equalize_adapthist(image)
-        image = image.astype('float32') * 255
-    elif CONTRAST == 2:
-        # contrast stretching
-        p2, p98 = np.percentile(image, (2, 98))
-        image = exposure.rescale_intensity(image, in_range=(p2, p98))
-    
-    return image
-
-# Tropfendetektion & Erkennung von klebenden Tropfen
-memory = []
-mean_diameter_total = []
-measurements = []
-stuck_droplet_data = []
-
-counter_1 = SAVE_NTH_IMAGE
-for filename in os.listdir(DATASET_DIR):
-    if not filename.endswith('.json'):
-        image = cv2.imread(os.path.join(DATASET_DIR, filename))
-        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
-        # pre process image
-        if IMAGE_CROP or CONTRAST != 0:
-            image = pre_processing(image, crop=IMAGE_CROP, contrast=CONTRAST)
-        image_height, image_width, _ = image.shape
-        image_length_max = max([image_width, image_height])
-        results = model.detect([image], filename=filename, verbose=1)
-        r = results[0]
-        # Creates a List of Droplet Objects with detected information
-        # Droplet object checks if droplet is too close to edge and whether aspec ratio is off on initialization
-        droplets = []
-        if config.GENERATE_MASKS:
-            for i, roi in enumerate(r['rois']):
-                droplets.append(Droplet(roi, r['masks'][:, :, i], filename))
-        else:
-            for roi in r['rois']:
-                droplets.append(Droplet(roi, None, filename))
-        print(f"Images in memory: {len(memory)}")
-
-        if DETECT_REFLECTIONS == True:
-            for a, b in itertools.combinations(droplets, 2):
-                if(
-                        a.center[0] > b.center[0]*0.95 and a.center[0] < b.center[0]*1.05 and
-                        a.center[1] > b.center[1]*0.95 and a.center[1] < b.center[1]*1.05
-                    ):      
-                    if a.mean_diameter_mm < b.mean_diameter_mm:
-                        a.fault = 4
-                    else:
-                        b.fault = 4       
-        # If memory is not empty iterate through droplets in current picture
-        if memory:
-            for current in droplets:
-                # If droplet has no faults so far compare to all droplets in memory
-                if current.fault == 0:
-                    shortest_distance = 2048
-                    closest_size_diff = 0
-                    # iterate memory from the back (most recent picture first) 
-                    # t is a variable for time between reference image and current memory entry
-                    for t, img in enumerate(reversed(memory), 1):
-                        for droplet in img[0]:
-                            measured_distance = current.distance(droplet.center)
-                            measured_size_diff = current.size_difference(droplet.range)
-                            measurements.append([measured_distance, measured_distance/t, measured_size_diff])
-                            # Checks whether measured distance and size diff is within defined threshholds
-                            if(measured_distance < MIN_VELOCITY * t and
-                                measured_size_diff < MIN_SIZE_DIFF
-                               ):
-                                shortest_distance = measured_distance / t
-                                closest_size_diff = measured_size_diff
-                                current.stuck.append(((-1*t), droplet.mean_diameter_mm, measured_distance, measured_distance/t, measured_size_diff))
-                                droplet.stuck.append((t, current.mean_diameter_mm, measured_distance, measured_distance/t,  measured_size_diff))
-                                if measured_distance < LOW_DISTANCE_THRESHOLD:
-                                    current.fault = 3
-                                else:
-                                    current.fault = 2
-                                if droplet.fault == 2 or droplet.fault == 0:
-                                    droplet.fault = current.fault
-                                break
-                            # Keeps track of shortest distances to help adjust threshholds
-                            elif measured_distance < shortest_distance:
-                                shortest_distance = measured_distance / t
-                                closest_size_diff = measured_size_diff
-                    if current.fault == 0:
-                        print("Droplet is valid:")
-                        print(f"\tshortest distance to any box:\t{round(shortest_distance,2)}")
-                        print(f"\tsize difference to closest box:\t{round(closest_size_diff*100,2)}%")
-                    else:
-                        print("Droplet is stuck to Lens:")
-                        print(f"\tdistance too close to box:\t{round(shortest_distance,2)}")
-                        print(f"\tsize difference to that box:\t{round(closest_size_diff*100,2)}%")
-                else:
-                    print(
-                        "Droplet is either to close to the edge or aspect ratio is off")
-        # Depending on the number of images campared, visualizes the one furthest back and removes it from the list
-        if(len(memory) == N_IMAGES_COMPARED):
-            visualize_result(memory,counter_1)
-            memory.pop(0)
-        # Append List of current droplets and image information to the memory
-        memory.append((droplets, image, filename))
-        print("\n")
-        if counter_1 == SAVE_NTH_IMAGE:
-            counter_1 = 0
-        counter_1 = counter_1 + 1
-
-# Visualizes the remaining pictures
-while memory:
-    visualize_result(memory,counter_1)
-    memory.pop(0)
-    if counter_1 == SAVE_NTH_IMAGE:
-        counter_1 = 0
-    counter_1 = counter_1 + 1
-### Translate Mean Diameter In Actual Droplet Sizes (mm)
-
-# recalculate mean diameter
-mean_diameter_total_resize = [(i * PIXELSIZE / 1000)
-                              for i in mean_diameter_total]
-
-### Convert Mean Diameter To Excel
-df = pd.DataFrame(mean_diameter_total_resize).to_excel(
-    EXCEL_DIR, header=False, index=False)
-### Save measured data for threshold tuning
-if SAVE_COORDINATES is True:
-    pd.DataFrame(measurements).to_excel(
-        os.path.join(SAVE_DIR, "xyz_measurements.xlsx"), header=False, index=False)
-    pd.DataFrame(stuck_droplet_data).to_excel(
-        os.path.join(SAVE_DIR, "stuck_droplet_data.xlsx"), header=False, index=False)
-
-print("--- %s seconds ---" % (time.time() - start_time))
-
diff --git a/classes/droplet/train_droplet.py b/classes/droplet/train_droplet.py
deleted file mode 100644
index 05de6d278c5be507c4e8d41d1a991f3ab30d284f..0000000000000000000000000000000000000000
--- a/classes/droplet/train_droplet.py
+++ /dev/null
@@ -1,901 +0,0 @@
-"""
-MRCNN Particle Detection
-Train the droplet class of the MRCNN model.
-
-The source code of "MRCNN Particle Detection" (https://git.rwth-aachen.de/avt-fvt/private/mrcnn-particle-detection) 
-is based on the source code of "Mask R-CNN" (https://github.com/matterport/Mask_RCNN).
-
-The source code of "Mask R-CNN" is licensed under the MIT License (MIT).
-Copyright (c) 2017 Matterport, Inc.
-Written by Waleed Abdulla
-
-All source code modifications to the source code of "Mask R-CNN" in "MRCNN Particle Detection" 
-are licensed under the Eclipse Public License v2.0 (EPL 2.0).
-Copyright (c) 2022-2023 Fluid Process Engineering (AVT.FVT), RWTH Aachen University
-Edited by Stepan Sibirtsev, Mathias Neufang & Jakob Seiler
-
-The coyprights and license terms are given in LICENSE.
-
-Ideas and a small code snippets were adapted from these sources:
-https://github.com/mat02/Mask_RCNN
-"""  
-
-### ----------------------------------- ###
-### Necessary Parameters and Data Names ###
-### ----------------------------------- ###
-
-# is the script executed on the cluster, e.g., RWTH High Performance Computing cluster? True = yes, False = no
-cluster = False
-
-### please specify only for non-cluster executions 
-
-# file format of images
-file_format = "jpg"
-# input dataset path to find in "...\datasets\input\..."
-dataset_path = r"test"                  
-# path to save the new weights "...\models\...
-new_weights_path = r"test"
-# name of the excel results file to find in "...\models\<WeightsFolderName>\"
-name_result_file = "test"
-# generate detection masks? True = yes, False = no
-masks = False
-# is the program execution done on GPU or CPU? True = GPU, False = CPU
-device = True
-# epochs to train
-epochs = 50
-# should early stopping be used? 0 = no, otherwise value is number of epochs without improvement
-early_stopping = 0
-# loss monitored by early stopping
-early_loss = "val_loss"
-# define base weights
-base_weights = "coco"
-# percentage of the training dataset to be used for training [%], e.g., to determine required number of images in training/validation set for accurate detection performance
-dataset_quantity = 100
-
-### specifications for Weights & Biases
-
-# use Weights & Biases to collect training data
-use_wandb = False
-# enter entity name
-wandb_entity = "test"
-# enter project name
-wandb_project = "test"
-# enter group name
-wandb_group = "test"
-# enter run name
-wandb_name = "test"
-
-### specifications for k-fold cross-validation
-
-# perform a k-fold cross validation? True = yes, False = no
-cross_validation = True
-# number of folds for k-fold cross-validation
-k_fold = 5
-# fold number to use for validation. Starting with 0
-k_fold_val = 0
-
-### specifications for training parameters
-
-# backbone (BACKBONE --> config.py). 0 = "resnet50", 1 = "resnet101"
-backbone_type = 0
-# train all layers = True, train only heads = False
-train_all_layers = False
-# number of images to train with on each GPU. 
-# a 12GB GPU can typically handle 2 images of 1024x1024px.
-# adjust based on your GPU memory and image sizes. 
-# if only one GPU is used, this parameter is equivalent to batch size (BATCH_SIZE --> config.py).
-images_gpu = 1
-# learning rate (LEARNING_RATE --> config.py). 0 = 0.01, 1 = 0.001, 2 = 0.0001
-learning = 1
-# image resolution (IMAGE_MAX_DIM --> config.py). 0 = 512, 1 = 1024, 2 = 2048
-# select the closest value corresponding to the largest side of the image.
-image_max = 1
-# learning momentum (LEARNING_MOMENTUM --> config.py). 0 = 0.8, 1 = 0.9, 2 = 0.99
-momentum = 1
-# weight decay (WEIGHT_DECAY --> config.py). 0 = 0.0001, 1 = 0.001, 2 = 0.01
-w_decay = 0
-
-### specifications for augmentation methods
-
-# use augmentation methods? True = yes, False = no
-augmentation = True
-# use flip? 0 = no, 1 = (0.5, 0.5)
-flip = 0
-# use crop? 0 = no, 1 = (-0.25, 0), 2 = (-0.1, 0)
-cropandpad = 0
-# use rotate? 0 = no, 1 = (-45, 45), 2 = (-90, 90)
-rotate = 0
-# use additive gaussian noise? 0 = no, 1 = 0.01, 2 = 0.02
-noise = 0 
-# use gamma contrast? 0 = no, 1 = yes
-gamma = 0 
-
-### specifications for contrast adjustment
-
-# use contrast adjustment? 0 = no, 1 = contrast limited adaptive histogramm equalization, 2 = contrast stretching  
-contrast = 0
-
-### ----------------------------------- ###
-###             Initialization          ###
-### ----------------------------------- ###
-
-import warnings
-warnings.simplefilter(action='ignore', category=FutureWarning)
-
-import os
-import sys
-import json
-import datetime
-import time
-import numpy as np
-import skimage.draw
-import tensorflow as tf
-import random
-import pandas as pd
-from numpy import array
-from numpy import asarray
-from pathlib import Path
-from skimage import exposure
-import cv2
-import glob
-
-# Root directory of the project
-if cluster == False:
-    ROOT_DIR = os.path.abspath("")
-    WEIGHTS_DIR = os.path.join(ROOT_DIR, "models", new_weights_path)
-    EXCEL_DIR = os.path.join(WEIGHTS_DIR, name_result_file + '.xlsx')
-    FILE_FORMAT = file_format
-    BASE_WEIGHTS = base_weights
-    IMAGE_MAX = image_max
-    EARLY = early_stopping
-    EARLY_LOSS = early_loss
-    EPOCH_NUMBER = epochs
-    DATASET_QUANTITY = dataset_quantity
-    K_FOLD = k_fold
-    K_FOLD_VAL = k_fold_val
-    DEVICE = device
-    IMAGES_GPU = images_gpu
-    MASKS = masks
-    AUGMENTATION = augmentation
-    CONTRAST = contrast
-    USE_WANDB = use_wandb
-    BACKBONE_TYPE = backbone_type
-    CROSS_VALIDATION = cross_validation
-    WANDB_ENTITY = wandb_entity
-    WANDB_PROJECT = wandb_project
-    WANDB_GROUP = wandb_group
-    WANDB_NAME = wandb_name
-    LEARNING = learning
-    MOMENTUM = momentum
-    W_DECAY = w_decay
-    TRAIN_ALL_LAYERS = train_all_layers
-    AUG_PARAMETERS = (cropandpad, rotate, noise, gamma, flip)
-    if CONTRAST == 0:
-        DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", dataset_path, "original")
-    elif CONTRAST == 1:
-        DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", dataset_path, "clahe")
-    elif CONTRAST == 2:
-        DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", dataset_path, "stretching")
-
-else:
-    import argparse
-    # Parse command line arguments
-    parser = argparse.ArgumentParser(
-        description='Train Mask R-CNN to detect droplets.')
-    parser.add_argument('--dataset_path', required=True, 
-                        help='Directory of the Droplet dataset')
-    parser.add_argument('--name_result_file', required=False, default='Results',
-                        metavar="/path/to/droplet/dataset/",
-                        help='Name of the excel result file to find in "Mask_R_CNN\models\<WeightsFolderName>\"')    
-    parser.add_argument('--new_weights_path', required=False, default='Weights',
-                        metavar="/path/to/logs/",
-                        help='Logs and checkpoints directory (default=logs/)')
-    parser.add_argument('--base_weights', required=False, default='coco',
-                        metavar="/path/to/weights.h5",
-                        help="Path to weights .h5 file or 'coco'")
-    parser.add_argument('--file_format', required=True,
-                        help='')
-    parser.add_argument('--masks', required=False, type=str,
-                        default="False",
-                        help='Generate detection masks? True = yes, False = no')
-    parser.add_argument('--device', required=False, type=str,
-                        default="True",
-                        help='is the evaluation done on GPU? True = yes, False = no')
-    parser.add_argument('--augmentation', required=False, type=str,
-                        default="False",
-                        help='image augmentation of dataset')
-    parser.add_argument('--use_wandb', required=False, type=str,
-                        default="False",
-                        help='use wandb for data collection')
-    parser.add_argument('--cross_validation', required=False, type=str,
-                        default="True",
-                        help='trains model on all data, disables train/validation split')
-    parser.add_argument('--train_all_layers', required=False, type=str,
-                        default="False",
-                        help='')    
-    #
-    parser.add_argument('--early_loss', required=False,
-                        default="val_loss",
-                        help='monitored early stopping quantity')
-    parser.add_argument('--image', required=False,
-                        metavar="path or URL to image",
-                        help='Image to apply the color splash effect on')
-    parser.add_argument('--video', required=False,
-                        metavar="path or URL to video",
-                        help='Video to apply the color splash effect on')
-    parser.add_argument('--wandb_entity', required=False,
-                        default="test",
-                        help='')    
-    parser.add_argument('--wandb_project', required=False,
-                        default="test",
-                        help='')
-    parser.add_argument('--wandb_group', required=False,
-                        default="test",
-                        help='')
-    parser.add_argument('--wandb_name', required=False,
-                        default="test",
-                        help='')
-    #
-    parser.add_argument('--image_max', required=False, type=int,
-                        default=1,
-                        help="max. image size")                      
-    parser.add_argument('--images_gpu', required=False, type=int,
-                        default=2,
-                        help='Number of images to train with on each GPU')
-    parser.add_argument('--early_stopping', required=False, type=int,
-                        default=0,
-                        help='enables early stopping')
-    parser.add_argument('--epochs', required=False, type=int,
-                        default=30,
-                        help='set number of training epochs, default = 15')
-    parser.add_argument('--dataset_quantity', required=False, type=int,
-                        default=100,
-                        help='ratio of train/validation dataset in [%], default = 100')
-    parser.add_argument('--k_fold', required=False, type=int,
-                        default=5,
-                        help='# number of folds for k-fold cross validation')       
-    parser.add_argument('--k_fold_val', required=False, type=int,
-                        default=0,
-                        help='fold of k fold validation set')            
-    parser.add_argument('--contrast', required=False, type=int,
-                        default=0,
-                        help='Contrast adjustment? 0 = no, 1 = contrast limited adaptive histogramm equalization, 2 = contrast stretching ')
-    parser.add_argument('--backbone_type', required=False, type=int,
-                        default=0,
-                        help='"resnet101" or "resnet50"')
-    parser.add_argument('--learning', required=False, type=int,
-                        default=2,
-                        help='')
-    parser.add_argument('--momentum', required=False, type=int,
-                        default=1,
-                        help='')
-    parser.add_argument('--w_decay', required=False, type=int,
-                        default=0,
-                        help='')
-   
-                        
-    # Augmentations
-    parser.add_argument('--flip', required=False, default="0")
-    parser.add_argument('--cropandpad', required=False, default="0")
-    parser.add_argument('--rotate', required=False, default="0")
-    parser.add_argument('--noise', required=False, default="0")
-    parser.add_argument('--gamma', required=False, default="0")
-    
-    args = parser.parse_args()
-
-    timestr = time.strftime("%H")   
-    ROOT_DIR = os.path.join("/rwthfs/rz/cluster", os.path.abspath("../.."))
-    save_dir = "/rwthfs/rz/cluster/hpcwork/ss002458/"
-    WEIGHTS_DIR = os.path.join(save_dir, "models", args.new_weights_path + timestr + str(random.randint(1000,9999)))
-    EXCEL_DIR = os.path.join(WEIGHTS_DIR, args.name_result_file + '.xlsx')    
-    FILE_FORMAT = args.file_format
-    BASE_WEIGHTS = args.base_weights
-    #
-    if args.masks == "True":
-        MASKS = True
-    elif args.masks == "False":
-        MASKS = False
-    if args.device == "True":
-        DEVICE = True
-    elif args.device == "False":
-        DEVICE = False
-    if args.augmentation == "True":
-        AUGMENTATION = True
-    elif args.augmentation == "False":
-        AUGMENTATION = False
-    if args.use_wandb == "True":
-        USE_WANDB = True
-    elif args.use_wandb == "False":
-        USE_WANDB = False
-    if args.cross_validation == "True":
-        CROSS_VALIDATION = True
-    elif args.cross_validation == "False":
-        CROSS_VALIDATION = False
-    if args.train_all_layers == "True":
-        TRAIN_ALL_LAYERS = True
-    elif args.train_all_layers == "False":
-        TRAIN_ALL_LAYERS = False       
-    #
-    IMAGE_MAX = args.image_max
-    EARLY = args.early_stopping
-    EARLY_LOSS = args.early_loss
-    EPOCH_NUMBER = args.epochs
-    DATASET_QUANTITY = args.dataset_quantity
-    K_FOLD = args.k_fold
-    K_FOLD_VAL = args.k_fold_val
-    DEVICE = args.device
-    IMAGES_GPU = args.images_gpu
-    MASKS = args.masks
-    AUGMENTATION = args.augmentation
-    CONTRAST = args.contrast
-    AUG_PARAMETERS = (args.cropandpad, args.rotate, args.noise, args.gamma, args.flip)
-    CROSS_VALIDATION = args.cross_validation
-    WANDB_ENTITY = args.wandb_entity
-    WANDB_PROJECT = args.wandb_project
-    WANDB_GROUP = args.wandb_group
-    WANDB_NAME = args.wandb_name
-    if CONTRAST == 0:
-        DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", args.dataset_path, "original")
-    elif CONTRAST == 1:
-        DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", args.dataset_path, "clahe")
-    elif CONTRAST == 2:
-        DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", args.dataset_path, "stretching")
-    USE_WANDB = args.use_wandb
-    BACKBONE_TYPE = args.backbone_type
-    LEARNING = args.learning
-    MOMENTUM = args.momentum
-    W_DECAY = args.w_decay
-
-
-### Initialization, wie viele Bilder insgesamt (training + validierung)
-dataset_size = 0
-images_mean_pixel = []
-for f in sorted(os.listdir(DATASET_DIR)): 
-    sub_folder = os.path.join(DATASET_DIR, f)
-    dataset_size = dataset_size + len(glob.glob1(sub_folder, "*." + FILE_FORMAT))
-    images_path = glob.glob(sub_folder + "/*." + FILE_FORMAT)
-    for img_path in images_path:
-        img = cv2.imread(img_path)
-        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
-        if CONTRAST ==1:
-            img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
-            img = exposure.equalize_adapthist(img)
-            img = img.astype('float32') * 255   
-        images_mean_pixel.append(img)
-color_sum=[0,0,0]
-for img in images_mean_pixel:
-    pixels = asarray(img)
-    pixels = pixels.astype('float32')
-    # calculate per-channel means and standard deviations
-    means = pixels.mean(axis=(0, 1), dtype='float64')
-    color_sum += means
-    mean_pixel = color_sum/len(images_mean_pixel)
-
-COMMAND_MODE = "train"
-# Import Mask RCNN
-sys.path.append(ROOT_DIR)  # To find local version of the library
-Path(WEIGHTS_DIR).mkdir(parents=True, exist_ok=True)
-from mrcnn.config import Config
-from mrcnn import model as modellib, utils
-
-# Path to trained weights file
-COCO_WEIGHTS_PATH = os.path.join(ROOT_DIR, "models/coco/mask_rcnn_coco.h5")
-
-# Directory to save logs and model checkpoints, if not provided
-# through the command line argument --logs
-# DEFAULT_LOGS_DIR = os.path.join(r'D:\logs', "models")
-
-############################################################
-#  Configurations
-############################################################
-
-
-class DropletConfig(Config):
-    """Configuration for training on the toy  dataset.
-    Derives from the base Config class and overrides some values.
-    """
-    # Give the configuration a recognizable name
-    NAME = "droplet"
-
-    # NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
-    GPU_COUNT = 1
-
-    # Generate detection masks
-    #     False: Output only bounding boxes like in Faster-RCNN
-    #     True: Generate masks as in Mask-RCNN
-    if MASKS == True:
-        GENERATE_MASKS = True
-    else: 
-        GENERATE_MASKS = False
-
-    # We use a GPU with 12GB memory, which can fit two images.
-    # Adjust down if you use a smaller GPU.
-    if DEVICE == True:
-        IMAGES_PER_GPU = IMAGES_GPU
-    else:
-        IMAGES_PER_GPU = 1
-
-    #
-    if BACKBONE_TYPE == 0:
-        BACKBONE = "resnet50"
-    else:   
-        BACKBONE = "resnet101"
-    #
-    if LEARNING == 0:
-        LEARNING_RATE = 0.01
-    elif LEARNING == 1:
-        LEARNING_RATE = 0.001
-    elif LEARNING == 2:
-        LEARNING_RATE = 0.0001        
-    #
-    if MOMENTUM == 0:
-        LEARNING_MOMENTUM = 0.8
-    elif MOMENTUM == 1:
-        LEARNING_MOMENTUM = 0.9
-    elif MOMENTUM == 2:
-        LEARNING_MOMENTUM = 0.99
-    #
-    if W_DECAY == 0:
-        WEIGHT_DECAY = 0.0001
-    elif W_DECAY == 1:
-        WEIGHT_DECAY = 0.001
-    elif W_DECAY == 2:
-        WEIGHT_DECAY = 0.01       
-
-    # Number of classes (including background)
-    NUM_CLASSES = 1 + 1  # Background + droplet
-
-    # Number of training steps per epoch
-    if CROSS_VALIDATION == True:
-        TRAINING_STEPS = round((DATASET_QUANTITY//100)*dataset_size*(K_FOLD-1)/K_FOLD)//(IMAGES_GPU*GPU_COUNT)
-        VALIDATION_STEPS = round((DATASET_QUANTITY//100)*dataset_size/K_FOLD)//(IMAGES_GPU*GPU_COUNT)
-    else:
-        TRAINING_STEPS = round((DATASET_QUANTITY//100)*dataset_size)//(IMAGES_GPU*GPU_COUNT)
-
-    # Input image resizing
-    if IMAGE_MAX == 0:
-        IMAGE_MAX_DIM = 512
-    elif IMAGE_MAX == 1:
-        IMAGE_MAX_DIM = 1024
-    elif IMAGE_MAX == 2:
-        IMAGE_MAX_DIM = 2048
-
-    IMAGE_MIN_DIM = IMAGE_MAX_DIM
-
-    MEAN_PIXEL = mean_pixel
-
-    # unterschiedlich
-    #CONTRAST = CONTRAST
-    #if CONTRAST == 0:
-    #    MEAN_PIXEL = np.array([120.4, 120.4, 120.4])
-    #elif CONTRAST == 1:
-    #    MEAN_PIXEL = np.array([112.3, 112.3, 112.3])
-    #elif CONTRAST == 2:
-    #    MEAN_PIXEL = np.array([148.9, 148.9, 148.9])
-
-
-############################################################
-#  Dataset
-############################################################
-
-class DropletDataset(utils.Dataset):
-
-    def load_droplet(self, dataset_dir, subset, model):
-        """Load a subset of the Droplet dataset.
-        dataset_dir: Root directory of the dataset.
-        subset: Subset to load: train or val
-        """
-        # Add classes. We have only one class to add.
-        self.add_class("droplet", 1, "droplet")
-
-        # define path of training/validation dataset
-        # dataset_dir = os.path.join(dataset_dir, "all")
-
-        # Load annotations
-        # VGG Image Annotator (up to version 1.6) saves each image in the form:
-        # { 'filename': '28503151_5b5b7ec140_b.jpg',
-        #   'regions': {
-        #       '0': {
-        #           'region_attributes': {},
-        #           'shape_attributes': {
-        #               'all_points_x': [...],
-        #               'all_points_y': [...],
-        #               'name': 'polygon'}},
-        #       ... more regions ...
-        #   },
-        #   'size': 100202
-        # }
-        # We mostly care about the x and y coordinates of each region
-        # Note: In VIA 2.0, regions was changed from a dict to a list.
-        annotations_all = []
-        annotations = []
-        for dataset_folder in sorted(os.listdir(dataset_dir)):
-            annotations_quality = json.load(open(os.path.join(dataset_dir, dataset_folder, "train.json")))
-            annotations_quality = list(annotations_quality.values()) # don't need the dict keys
-            # The VIA tool saves images in the JSON even if they don't have any
-            # annotations. Skip unannotated images.
-            annotations_quality = [a for a in annotations_quality if a['regions']]
-            if CROSS_VALIDATION == True:
-                ### random choice of the training/validation dataset from the existing dataset
-                # resetting the random seed to ensure comparability between runs
-                np.random.seed(23)
-                # define quantity of train/validation dataset
-                train_val_set = int(round(DATASET_QUANTITY*len(annotations_quality)/100))
-                # random choice of the training/validation dataset
-                annotations_quality = np.random.choice(annotations_quality, train_val_set, replace=False)
-                # split training/validation dataset in folds
-                annotations_quality = np.array_split(annotations_quality,K_FOLD)
-                # transponse list for further processing
-                annotations_quality = np.transpose(annotations_quality)
-                # merging the datasets of different qualities into one dataset
-                annotations_all.extend(annotations_quality)
-                # save the k-fold splitted training/validation dataset
-                pd.DataFrame(annotations_all).to_excel(EXCEL_DIR, header=True, index=False)
-                # go through columns of the k-fold splitted training/validation dataset
-                for column in range(K_FOLD): 
-                    annotations = [row[column] for row in annotations_quality]   
-                    # check if partial dataset is a train or validation dataset
-                    if subset == "train" and column != K_FOLD_VAL:
-                        annotations_use = annotations
-                    elif subset == "val" and column == K_FOLD_VAL:
-                        annotations_use = annotations
-                    else:
-                        continue
-                    # Add images
-                    for a in annotations_use:
-                        # Get the x, y coordinates of points of the polygons that make up
-                        # the outline of each object instance. These are stores in the
-                        # shape_attributes (see json format above)
-                        # The if condition is needed to support VIA versions 1.x and 2.x.
-                        if type(a['regions']) is dict:
-                            polygons = [r['shape_attributes'] for r in a['regions'].values()]
-                        else:
-                            polygons = [r['shape_attributes'] for r in a['regions']] 
-
-                        # load_mask() needs the image size to convert polygons to masks.
-                        # Unfortunately, VIA doesn't include it in JSON, so we must read
-                        # the image. This is only managable since the dataset is tiny.
-                        image_path = os.path.join(dataset_dir, dataset_folder, a['filename'])
-                        image = skimage.io.imread(image_path)  
-                        height, width = image.shape[:2]
-
-                        if type(a['regions']) is dict:
-                            polygons = [r['shape_attributes'] for r in a['regions'].values()]
-                        else:
-                            polygons = [r['shape_attributes'] for r in a['regions']] 
-                        
-                        self.add_image(
-                            "droplet",
-                            image_id=a['filename'],  # use file name as a unique image id
-                            path=image_path,
-                            width=width, height=height,
-                            polygons=polygons)
-            else:
-                ### random choice of the training/validation dataset from the existing dataset
-                # resetting the random seed to ensure comparability between runs
-                np.random.seed(23)
-                # define quantity of train/validation dataset
-                train_val_set = int(round(DATASET_QUANTITY*len(annotations_quality)/100))
-                # random choice of the training/validation dataset
-                annotations_quality = np.random.choice(annotations_quality, train_val_set, replace=False)
-                if subset == "train":
-                    for a in annotations_quality:
-                        image_path = os.path.join(dataset_dir, dataset_folder, a['filename'])
-                        image = skimage.io.imread(image_path)
-                        height, width = image.shape[:2]
-                        
-                        if type(a['regions']) is dict:
-                            polygons = [r['shape_attributes'] for r in a['regions'].values()]
-                        else:
-                            polygons = [r['shape_attributes'] for r in a['regions']] 
-                        
-                        self.add_image(
-                            "droplet",
-                            # use file name as a unique image id
-                            image_id=a['filename'],
-                            path=image_path,
-                            width=width, height=height,
-                            polygons=polygons)
-
-    def load_mask(self, image_id):
-        """Generate instance masks for an image.
-       Returns:
-        masks: A bool array of shape [height, width, instance count] with
-            one mask per instance.
-        class_ids: a 1D array of class IDs of the instance masks.
-        """
-        # If not a droplet dataset image, delegate to parent class.
-        image_info = self.image_info[image_id]
-        if image_info["source"] != "droplet":
-            return super(self.__class__, self).load_mask(image_id)
-
-        # Convert polygons to a bitmap mask of shape
-        # [height, width, instance_count]
-        info = self.image_info[image_id]
-        mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
-                        dtype=np.uint8)
-       
-        for i, p in enumerate(info["polygons"]):
-            # Get indexes of pixels inside the polygon and set them to 1
-            if p['name']=='polygon':
-                rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
-            
-            elif p['name']=='ellipse':
-                rr, cc = skimage.draw.ellipse(p['cy'], p['cx'], p['ry'], p['rx'])
-
-            else:
-                
-                rr, cc = skimage.draw.circle(p['cy'], p['cx'], p['r'])
-            
-            x = np.array((rr, cc)).T
-            d = np.array([i for i in x if (i[0] < info["height"] and i[0] > 0)])
-            e = np.array([i for i in d if (i[1] < info["width"] and i[1] > 0)])
-
-            rr = np.array([u[0] for u in e])
-            cc = np.array([u[1] for u in e])
-
-            if len(rr)==0 or len(cc)==0:
-                continue
-            mask[rr, cc, i] = 1    
-
-        # Return mask, and array of class IDs of each instance. Since we have
-        # one class ID only, we return an array of 1s
-        return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
-
-    def image_reference(self, image_id):
-        """Return the path of the image."""
-        info = self.image_info[image_id]
-        if info["source"] == "droplet":
-            return info["path"]
-        else:
-            super(self.__class__, self).image_reference(image_id)
-
-def get_augmentation():
-    import imgaug.augmenters as iaa
-    
-    # tables which translate input to values for evaluation
-    # simpler to pass 0-2 in array jobs than specific values
-    
-    cropDict = {
-        1: (-0.25, 0),
-        2: (-0.1, 0)
-    }
-    rotDict = {
-        1: (-45, 45),
-        2: (-90, 90)
-    }
-    noiseDict = {
-        1: 0.01,
-        2: 0.02
-    }
-    flipDict = {
-        1: (0.5, 0.5)
-    }
-    
-    if AUG_PARAMETERS[4] != 0:
-        flip_lr_up = flipDict[AUG_PARAMETERS[4]]
-        aug = iaa.Sequential([
-                iaa.Fliplr(flip_lr_up[0]),
-                iaa.Flipud(flip_lr_up[1])
-        # iaa.Sometimes(0.5, iaa.Rot90(1))
-        ])
-    else:
-        aug = iaa.Sequential([
-        ])
-    # add other augments that were enabled
-    if AUG_PARAMETERS[1] != 0:
-        rot = rotDict[AUG_PARAMETERS[1]]
-        randrot = iaa.Affine(rotate=(rot[0], rot[1]))
-        aug = iaa.Sequential([aug, randrot])
-
-    if AUG_PARAMETERS[0] != 0:
-        crop = cropDict[AUG_PARAMETERS[0]]
-        randcrop = iaa.CropAndPad(percent=(crop[0], crop[1]), sample_independently=False)
-        aug = iaa.Sequential([aug, randcrop])
-
-    if AUG_PARAMETERS[3] != 0:
-        #gamma = iaa.Sometimes(0.25*AUG_PARAMETERS[4], iaa.GammaContrast(gamma=(0.5, 2)))
-        gamma = AUG_PARAMETERS[3], iaa.GammaContrast(gamma=(0.5, 2))
-        aug = iaa.Sequential([aug, gamma])
-
-    if AUG_PARAMETERS[2] != 0:
-        noise = noiseDict[AUG_PARAMETERS[2]]
-        gaussnoise = iaa.AdditiveGaussianNoise(scale=noise*255)
-        aug = iaa.Sequential([aug, gaussnoise])
-    return aug
-
-def train(model, custom_callbacks=None):
-    """Train the model."""
-
-    # Training dataset.
-    dataset_train = DropletDataset()
-    dataset_train.load_droplet(DATASET_DIR, "train", model)
-    dataset_train.prepare()
-
-    # Validation dataset
-    if CROSS_VALIDATION == True:  
-        dataset_val = DropletDataset()
-        dataset_val.load_droplet(DATASET_DIR, "val", model)
-        dataset_val.prepare()
-    else:
-        dataset_val = None
-    print(f"Train Dataset: {len(dataset_train.image_ids)} Pictures")
-
-    # define augmentation
-    if AUGMENTATION == True:
-        augmentation_type = get_augmentation()
-    else: 
-        augmentation_type = None
-
-    # *** This training schedule is an example. Update to your needs ***
-    # Since we're using a very small dataset, and starting from
-    # COCO trained weights, we don't need to train too long. Also,
-    # no need to train all layers, just the heads should do it.
-    print("Training network layers")
-    if TRAIN_ALL_LAYERS == True:
-        layers = 'all'
-    else:
-        layers = 'heads'
-    model.train(dataset_train, dataset_val,
-                learning_rate=config.LEARNING_RATE,
-                epochs=EPOCH_NUMBER, augmentation=augmentation_type,
-                layers=layers, custom_callbacks=custom_callbacks)
-
-def color_splash(image, mask):
-    """Apply color splash effect.
-    image: RGB image [height, width, 3]
-    mask: instance segmentation mask [height, width, instance count]
-
-    Returns result image.
-    """
-    # Make a grayscale copy of the image. The grayscale copy still
-    # has 3 RGB channels, though.
-    gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255
-    # Copy color pixels from the original color image where mask is set
-    if mask.shape[-1] > 0:
-        # We're treating all instances as one, so collapse the mask into one layer
-        mask = (np.sum(mask, -1, keepdims=True) >= 1)
-        splash = np.where(mask, image, gray).astype(np.uint8)
-    else:
-        splash = gray.astype(np.uint8)
-    return splash
-
-
-def detect_and_color_splash(model, image_path=None, video_path=None):
-    assert image_path or video_path
-
-    # Image or video?
-    if image_path:
-        # Run model detection and generate the color splash effect
-        print("Running on {}".format(args.image))
-        # Read image
-        image = skimage.io.imread(args.image)
-        # Detect objects
-        r = model.detect([image], verbose=1)[0]
-        # Color splash
-        splash = color_splash(image, r['masks'])
-        # Save output
-        file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.datetime.now())
-        skimage.io.imsave(file_name, splash)
-    elif video_path:
-        import cv2
-        # Video capture
-        vcapture = cv2.VideoCapture(video_path)
-        width = int(vcapture.get(cv2.CAP_PROP_FRAME_WIDTH))
-        height = int(vcapture.get(cv2.CAP_PROP_FRAME_HEIGHT))
-        fps = vcapture.get(cv2.CAP_PROP_FPS)
-
-        # Define codec and create video writer
-        file_name = "splash_{:%Y%m%dT%H%M%S}.avi".format(datetime.datetime.now())
-        vwriter = cv2.VideoWriter(file_name,
-                                  cv2.VideoWriter_fourcc(*'MJPG'),
-                                  fps, (width, height))
-
-        count = 0
-        success = True
-        while success:
-            print("frame: ", count)
-            # Read next image
-            success, image = vcapture.read()
-            if success:
-                # OpenCV returns images as BGR, convert to RGB
-                image = image[..., ::-1]
-                # Detect objects
-                r = model.detect([image], verbose=0)[0]
-                # Color splash
-                splash = color_splash(image, r['masks'])
-                # RGB -> BGR to save image to video
-                splash = splash[..., ::-1]
-                # Add image to video writer
-                vwriter.write(splash)
-                count += 1
-        vwriter.release()
-    print("Saved to ", file_name)
-
-
-############################################################
-#  Training
-############################################################
-
-if __name__ == '__main__':
-    
-    # Validate arguments
-    if COMMAND_MODE == "train":
-        assert DATASET_DIR, "Argument --dataset is required for training"
-    elif COMMAND_MODE == "splash":
-        assert args.image or args.video,\
-               "Provide --image or --video to apply color splash"
-
-    print("Weights: ", BASE_WEIGHTS)
-    print("Dataset: ", DATASET_DIR)
-    print("Logs: ", WEIGHTS_DIR)
-
-    # Configurations
-    if COMMAND_MODE == "train":
-        config = DropletConfig()
-    else:
-        class InferenceConfig(DropletConfig):
-            # Set batch size to 1 since we'll be running inference on
-            # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
-            GPU_COUNT = 1
-            IMAGES_PER_GPU = 1
-        config = InferenceConfig()
-    config.display()
-
-    if USE_WANDB:
-        import wandb
-        from wandb.keras import WandbCallback
-        wandb.init(project=WANDB_PROJECT,
-                   entity=WANDB_ENTITY, config=config, group=WANDB_GROUP, name=WANDB_NAME)
-
-    # Create model
-    if COMMAND_MODE == "train":
-        model = modellib.MaskRCNN(mode="training", config=config,
-                                  model_dir=WEIGHTS_DIR, k_fold_val=K_FOLD_VAL)
-    else:
-        model = modellib.MaskRCNN(mode="inference", config=config,
-                                  model_dir=WEIGHTS_DIR, k_fold_val=K_FOLD_VAL)
-
-    # Select weights file to load
-    if BASE_WEIGHTS.lower() == "coco":
-        weights_path = COCO_WEIGHTS_PATH
-        # Download weights file
-        if not os.path.exists(weights_path):
-            utils.download_trained_weights(weights_path)
-    elif BASE_WEIGHTS.lower() == "last":
-        # Find last trained weights
-        weights_path = model.find_last()
-    elif BASE_WEIGHTS.lower() == "imagenet":
-        # Start from ImageNet trained weights
-        weights_path = model.get_imagenet_weights()
-    else:
-        weights_path = os.path.join(ROOT_DIR, "models", BASE_WEIGHTS + '.h5')
-
-    # Load weights
-    print("Loading weights ", weights_path)
-    if BASE_WEIGHTS.lower() == "coco":
-        # Exclude the last layers because they require a matching
-        # number of classes
-        model.load_weights(weights_path, by_name=True, exclude=[
-            "mrcnn_class_logits", "mrcnn_bbox_fc",
-            "mrcnn_bbox", "mrcnn_mask"])
-    else:
-        model.load_weights(weights_path, by_name=True)
-
-    custom_callbacks = []
-    
-    if USE_WANDB:
-        custom_callbacks.append(WandbCallback())
-
-    if EARLY:
-        custom_callbacks.append(tf.keras.callbacks.EarlyStopping(monitor=EARLY_LOSS, patience=EARLY))
-        
-    # Train or evaluate
-    if COMMAND_MODE == "train":
-        train(model, custom_callbacks=custom_callbacks)
-    elif COMMAND_MODE == "splash":
-        detect_and_color_splash(model, image_path=args.image,
-                                video_path=args.video)
-    else:
-        print("'{}' is not recognized. "
-              "Use 'train' or 'splash'".format(COMMAND_MODE))