From 1ae7514dea69eb36ef4640f3b753ebfbb678438e Mon Sep 17 00:00:00 2001
From: ssibirtsev <sibi_ballad@gmx.de>
Date: Wed, 15 Nov 2023 14:55:29 +0100
Subject: [PATCH] Upload New File

---
 classes/droplet/proces_manual_droplet.py | 344 +++++++++++++++++++++++
 1 file changed, 344 insertions(+)
 create mode 100644 classes/droplet/proces_manual_droplet.py

diff --git a/classes/droplet/proces_manual_droplet.py b/classes/droplet/proces_manual_droplet.py
new file mode 100644
index 0000000..ade37c0
--- /dev/null
+++ b/classes/droplet/proces_manual_droplet.py
@@ -0,0 +1,344 @@
+"""
+MRCNN Particle Detection
+Determine droplet diameters from manual processing.
+
+The source code of "MRCNN Particle Detection" (https://git.rwth-aachen.de/avt-fvt/private/mrcnn-particle-detection) 
+is based on the source code of "Mask R-CNN" (https://github.com/matterport/Mask_RCNN).
+
+The source code of "Mask R-CNN" is licensed under the MIT License (MIT).
+Copyright (c) 2017 Matterport, Inc.
+Written by Waleed Abdulla
+
+All source code modifications to the source code of "Mask R-CNN" in "MRCNN Particle Detection" 
+are licensed under the Eclipse Public License v2.0 (EPL 2.0).
+Copyright (c) 2022-2023 Fluid Process Engineering (AVT.FVT), RWTH Aachen University
+Edited by Stepan Sibirtsev, Mathias Neufang & Jakob Seiler
+
+The coyprights and license terms are given in LICENSE.
+
+Ideas and a small code snippets were adapted from these sources:
+https://github.com/mat02/Mask_RCNN
+"""  
+
+### --------------------------- ###
+### Input processing parameters ###
+### --------------------------- ###
+
+# Is the script executed on the cluster? 
+# E.g., RWTH High Performance Computing cluster? 
+# True=yes, False=no
+cluster=False
+
+### Please specify only for non-cluster evaluations 
+
+# Input dataset folder located in path: "...\datasets\input\..."
+dataset_path="test_input"  
+# Output images folder located in path: "...\datasets\output\..."
+save_path="test_output" 
+# Name of the excel output file located in path: "...\datasets\output\..."
+name_result_file="DSD" 
+# Generate detection masks? 
+# True=yes, False=no
+masks=False
+# Save n-th result image 
+save_nth_image=1  
+# Pixel size in [µm/px]. 
+# To read the pixel size value from Sopat log file enter pixelsize=0 
+# (Sopat generates a JSON file with including information)
+pixelsize=1
+
+### Specifications for filters
+
+# Detect and mark oval droplets? 
+# The detected oval droplets are excluded from the evaluation
+# and do not appear in the excel output file.
+# Marking color is red.
+# True=yes, False=no
+detect_oval_droplets=False
+# Minimum aspect ratio: filter for elliptical shapes              
+min_aspect_ratio=0.9 
+# Edge threshold: filter for image border intersecting droplets.
+# Image border intersecting droplets are marked in color red.
+edge_tolerance=0.01
+
+### ----------------------------------- ###
+###             Initialization          ###
+### ----------------------------------- ###
+
+import os
+import sys
+import itertools
+import math
+import logging
+import json
+import re
+import random
+import cv2
+import pandas as pd
+from collections import OrderedDict
+import numpy as np
+import skimage.draw
+import matplotlib
+import matplotlib.pyplot as plt
+import matplotlib.patches as patches
+import matplotlib.lines as lines
+from matplotlib.patches import Polygon
+from pathlib import Path
+
+
+# Root directory of the project
+if cluster is False:
+    ROOT_DIR = os.path.abspath("")
+    DATASET_DIR = os.path.join(ROOT_DIR, "datasets\\input", dataset_path)
+    SAVE_DIR = os.path.join(ROOT_DIR, "datasets\\output", save_path)
+    EXCEL_DIR = os.path.join(SAVE_DIR, name_result_file + '.xlsx')
+    MASKS = masks
+    PIXELSIZE = pixelsize
+    EDGE_TOLERANCE = edge_tolerance
+    MIN_ASPECT_RATIO = min_aspect_ratio
+    DETECT_OVAL_DROPLETS = detect_oval_droplets
+    SAVE_NTH_IMAGE = save_nth_image
+else:
+    import argparse
+    # Parse command line arguments
+    parser = argparse.ArgumentParser(
+        description='evaluation on cluster')
+    parser.add_argument('--dataset_path', required=True,
+                        help='Dataset path to find in Mask_R_CNN\datasets\input')
+    parser.add_argument('--save_path', required=False, default="test_output",
+                        help='Save path to find in Mask_R_CNN\datasets\output')
+    parser.add_argument('--name_result_file', required=False, default="DSD",
+                        help='Name of the excel result file to find in Mask_R_CNN\datasets\output')
+    parser.add_argument('--detect_oval_droplets', required=False,
+                        default=False,
+                        help="")
+    parser.add_argument('--pixelsize', required=True, 
+                        help="")
+    parser.add_argument('--save_nth_image', required=False,
+                        default=1,
+                        help="")                        
+    parser.add_argument('--min_aspect_ratio', required=False,
+                        default=0.9,
+                        help="")
+    parser.add_argument('--edge_tolerance', required=False,
+                        default=0.01,
+                        help="")
+    parser.add_argument('--masks', required=False,
+                        default=False,
+                        help='Generate detection masks?')
+    args = parser.parse_args()
+    ROOT_DIR = os.path.join("/rwthfs/rz/cluster", os.path.abspath("../.."))
+    DATASET_DIR = os.path.join(ROOT_DIR, "datasets/input", args.dataset_path)
+    SAVE_DIR = os.path.join(ROOT_DIR, "datasets/output", args.save_path)
+    EXCEL_DIR = os.path.join(SAVE_DIR, args.name_result_file + '.xlsx')
+    MASKS = bool(args.masks)
+    PIXELSIZE = int(args.pixelsize)
+    EDGE_TOLERANCE = int(args.edge_tolerance)
+    MIN_ASPECT_RATIO = int(args.min_aspect_ratio)
+    DETECT_OVAL_DROPLETS = bool(args.detect_oval_droplets)
+    SAVE_NTH_IMAGE = int(args.save_nth_image)
+
+# read pixelsize from JSON-File (if input data is from a Sopat measurement)
+if PIXELSIZE == 0:
+    sopat_name = (DATASET_DIR + '/' + 'Sopat_Log.json')
+    sopat_name_new = (DATASET_DIR + '/Sopat_Log_New.json')
+
+    with open(sopat_name, "r",encoding="utf-8") as sopat_content:
+        content_lines = sopat_content.readlines()
+
+    current_line = 1
+    with open(sopat_name_new, "w",encoding="utf-8") as sopat_content_new:
+        for line in content_lines:
+            if current_line == 30:
+                pass
+            else:
+                sopat_content_new.write(line)
+            current_line += 1
+    sopat_data = json.load(open(sopat_name_new, "r", encoding="utf-8"))
+    PIXELSIZE = sopat_data["sopatCamControlAcquisitionLog"]["conversionMicronsPerPx"]
+
+# Import Mask RCNN
+sys.path.append(ROOT_DIR)  # To find local version of the library
+Path(SAVE_DIR).mkdir(parents=True, exist_ok=True)
+from mrcnn import utils
+from mrcnn import visualize
+from mrcnn.visualize import display_images
+import mrcnn.model as modellib
+from mrcnn.model import log
+from mrcnn.config import Config
+
+class DropletConfig(Config):
+    """Configuration for training on the toy  dataset.
+    Derives from the base Config class and overrides some values.
+    """
+    # Give the configuration a recognizable name
+    NAME = "droplet"
+
+    # NUMBER OF GPUs to use. When using only a CPU, this needs to be set to 1.
+    GPU_COUNT = 1
+
+    # Generate detection masks
+    #     False: Output only bounding boxes like in Faster-RCNN
+    #     True: Generate masks as in Mask-RCNN
+    if MASKS is True:
+        GENERATE_MASKS = True
+    else:
+        GENERATE_MASKS = False
+
+# Configurations
+config = DropletConfig()
+
+class DropletDataset(utils.Dataset):
+
+    def load_droplet(self, dataset_dir, subset):
+        """Load a subset of the Droplet dataset.
+        dataset_dir: Root directory of the dataset.
+        subset: Subset to load: train or val
+        """
+        # Add classes. We have only one class to add.
+        self.add_class("droplet", 1, "droplet")
+
+        # We mostly care about the x and y coordinates of each region
+        # Note: In VIA 2.0, regions was changed from a dict to a list.
+        annotations = json.load(open(os.path.join(dataset_dir, "test.json")))
+        annotations = list(annotations.values())  # don't need the dict keys
+        # The VIA tool saves images in the JSON even if they don't have any
+        # annotations. Skip unannotated images.
+        annotations = [a for a in annotations if a['regions']]
+        # Add images
+        for a in annotations:
+            # Get the x, y coordinaets of points of the polygons that make up
+            # the outline of each object instance. These are stores in the
+            # shape_attributes (see json format above)
+            # The if condition is needed to support VIA versions 1.x and 2.x.
+            if type(a['regions']) is dict:
+                polygons = [r['shape_attributes']
+                            for r in a['regions'].values()]
+            else:
+                polygons = [r['shape_attributes'] for r in a['regions']]
+
+            # load_mask() needs the image size to convert polygons to masks.
+            # Unfortunately, VIA doesn't include it in JSON, so we must read
+            # the image. This is only managable since the dataset is tiny.
+            image_path = os.path.join(dataset_dir, a['filename'])
+            image = skimage.io.imread(image_path)
+            height, width = image.shape[:2]
+
+            self.add_image(
+                "droplet",
+                image_id=a['filename'],  # use file name as a unique image id
+                path=image_path,
+                width=width, height=height,
+                polygons=polygons)
+
+    def load_mask(self, image_id):
+        """Generate instance masks for an image.
+       Returns:
+        masks: A bool array of shape [height, width, instance count] with
+            one mask per instance.
+        class_ids: a 1D array of class IDs of the instance masks.
+        """
+        # If not a droplet dataset image, delegate to parent class.
+        image_info = self.image_info[image_id]
+        if image_info["source"] != "droplet":
+            return super(self.__class__, self).load_mask(image_id)
+
+        # Convert polygons to a bitmap mask of shape
+        info = self.image_info[image_id]
+        mask = np.zeros([info["height"], info["width"], len(info["polygons"])],
+                        dtype=np.uint8)
+
+        for i, p in enumerate(info["polygons"]):
+            # Get indexes of pixels inside the polygon and set them to 1
+            if p['name'] == 'polygon':
+                rr, cc = skimage.draw.polygon(
+                    p['all_points_y'], p['all_points_x'])
+
+            elif p['name'] == 'ellipse':
+                rr, cc = skimage.draw.ellipse(
+                    p['cy'], p['cx'], p['ry'], p['rx'])
+            else:
+                rr, cc = skimage.draw.circle(p['cy'], p['cx'], p['r'])
+            x = np.array((rr, cc)).T
+            d = np.array(
+                [i for i in x if (i[0] < info["height"] and i[0] > 0)])
+            e = np.array([i for i in d if (i[1] < info["width"] and i[1] > 0)])
+            rr = np.array([u[0] for u in e])
+            cc = np.array([u[1] for u in e])
+
+            if len(rr) == 0 or len(cc) == 0:
+                continue
+            mask[rr, cc, i] = 1
+
+        # Return mask, and array of class IDs of each instance. Since we have
+        # one class ID only, we return an array of 1s
+        return mask.astype(np.bool), np.ones([mask.shape[-1]], dtype=np.int32)
+
+    def image_reference(self, image_id):
+        """Return the path of the image."""
+        info = self.image_info[image_id]
+        if info["source"] == "droplet":
+            return info["path"]
+        else:
+            super(self.__class__, self).image_reference(image_id)
+
+dataset = DropletDataset()
+dataset.load_droplet(DATASET_DIR, None)
+dataset.prepare()
+
+def get_ax(rows=1, cols=1, size=8):
+    """Return a Matplotlib Axes array to be used in
+    all visualizations in the notebook. Provide a
+    central point to control graph sizes.
+    Adjust the size attribute to control how big to render images"""
+    _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))
+    return ax
+
+# Load and display samples
+mean_diameter_total = []
+for image_id in dataset.image_ids:
+    image_width = image_name=dataset.image_info[image_id]['width']
+    image_height = image_name=dataset.image_info[image_id]['height']
+    image_name = dataset.image_info[image_id]['id']
+    image = dataset.load_image(image_id)
+    mask, class_ids = dataset.load_mask(image_id)
+    bbox = utils.extract_bboxes(mask)
+    colors = []
+    diameter_vis_list = []
+    for i in range(len(bbox)):
+        bbox_delta = (abs(bbox[i][1] - bbox[i][3]), abs(bbox[i][0] - bbox[i][2]))
+        diameter = ((bbox_delta[1]+1)*(bbox_delta[0]+1)**2)**(1/3)
+        #diameter = abs((bbox_delta[0] + bbox_delta[1] + 2) / 2)
+        diameter_vis_list.append(round(diameter * PIXELSIZE / 1000,3))
+        if DETECT_OVAL_DROPLETS is True:
+            if (
+                bbox[i][0] <= image_height*EDGE_TOLERANCE or bbox[i][1] <= image_width*EDGE_TOLERANCE or 
+                bbox[i][2] >= image_height*(1-EDGE_TOLERANCE) or bbox[i][3] >= image_width*(1-EDGE_TOLERANCE)
+                ):
+                colors.append((1, 0, 0))
+            else:
+                colors.append((0, 1, 0))
+                mean_diameter_total.append(diameter)           
+        else:
+            if (
+                bbox[i][0] <= image_height*EDGE_TOLERANCE or bbox[i][1] <= image_width*EDGE_TOLERANCE or 
+                bbox[i][2] >= image_height*(1-EDGE_TOLERANCE) or bbox[i][3] >= image_width*(1-EDGE_TOLERANCE) or
+                # checks if bbox is within allowed aspect ratio
+                bbox_delta[0] / bbox_delta[1] >= 1 / MIN_ASPECT_RATIO or
+                bbox_delta[0] / bbox_delta[1] <= MIN_ASPECT_RATIO               
+                ):
+                colors.append((1, 0, 0))
+            else:
+                colors.append((0, 1, 0))
+                mean_diameter_total.append(diameter)    
+    ax = get_ax(1)
+    if config.GENERATE_MASKS is True:
+        visualize.display_instances(image, bbox, mask, class_ids, dataset.class_names, ax=ax, colors=colors, captions=diameter_vis_list,
+                                    save_dir=SAVE_DIR, img_name=image_name, save_img=True, number_saved_images=SAVE_NTH_IMAGE)
+    else:
+        visualize.display_instances(image, bbox, None, class_ids, dataset.class_names, ax=ax, show_mask=False, colors=colors, captions=diameter_vis_list,
+                                    title=None, save_dir=SAVE_DIR, img_name=image_name, save_img=True, number_saved_images=SAVE_NTH_IMAGE, counter_1=SAVE_NTH_IMAGE )
+    plt.close()
+
+### Convert Mean Diameter To Excel
+df = pd.DataFrame(mean_diameter_total).to_excel(EXCEL_DIR, header=False, index=False)
\ No newline at end of file
-- 
GitLab