-
ssibirtsev authoredssibirtsev authored
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
cluster_job_GPU_processing_template.job 3.72 KiB
#!/usr/bin/zsh
##############################################
##### Batch script for the MRCNN processing ####
##############################################
#### CREATE SBATCH ENTRIES ####
#### Paths and parameters must be adapted accordingly.
#### job name
#SBATCH --job-name=<JobName>
#### Path and name of the output file of the job execution
#SBATCH --output=/home/<UserID>/.../<JobOutputFolderName>/%x_%J_output.txt
#### Job runtime
#SBATCH --time=0-00:00:00
#### Memory requirement per GPU .
#### For example: if value is 5GB --> --mem-per-gpu=5G
#SBATCH --mem-per-gpu=5G
#### E-mail address
#SBATCH --mail-user=<EmailAdress>
#### E-mails to be received
#SBATCH --mail-type=ALL
#### Number of tasks to be performed
#SBATCH --ntasks=1
#### Number of GPUs required per node
#SBATCH --gres=gpu:1
#### Definition of the job array starting at 0. ###
#### This parameter is only required if you want to perform several jobs in parallel
#### from one job script, e.g. processing one testing image set with several MRCNN models (epochs)
#### In this example we process one testing image set with 10 MRCNN models (= 10 epochs).
#### Thus, we will run 10 jobs in parallel from one job script --> array=0-9
#SBATCH --array=0-9
#### CREATE TERMINAL ENTRIES ####
#### Paths and parameters must be adapted accordingly
#### Definition of the job parameter, which is varied
#### if several jobs are executed in parallel from one job script.
#### This job parameter is only required if you have specified the #SBATCH parameter --array above.
#### In this example, we process one testing image set with 10 MRCNN models.
#### Thus, we will run 10 jobs in parallel from one job script:
#### the parameter model corresponds to the model of the current processing,
#### which is varied for each job.
model="$SLURM_ARRAY_TASK_ID"
#### Loading the Cuda module
module load cuda/10.0
#### Export path in which Anaconda is located
export PATH=$PATH:/home/<UserID>/anaconda3/bin
#### Activate environment
source activate env_mrcnn_gpu
#### Navigate to the path where the droplet.py script is located
cd /home/<UserID>/.../samples/droplet/
#### Run the process_automated_droplet.py script.
#### These are the required processing parameters to be specified
#### with additional parameters required for the execution of parallel jobs from one job script.
#### In this example, we process one testing image set with 10 MRCNN models.
#### Thus, 10 jobs are executed in parallel (#SBATCH --array=0-9).
#### In each job the job parameter model is varied, starting with 0 and ending with 9.
#### The model names are model_00 to model_09.
#### First, we specify the processing parameter weights_name (--weights_name=model_0"$model").
#### Moreover, we specify output folder and Excel output file names
#### defined by the processing parameters save_path and name_result_file, since we need 10 of them.
#### Optional processing parameters can be found below.
#### Description/default settings of all processing parameters see manual.
python process_automated_droplet.py --dataset_path=<InputFolderName> --save_path=<OutputFolderName>_0"$model" --name_result_file=<ExcelFileName>_0"$model" --weights_path=<WeightsFolderName> --weights_name=model_0"$model" --file_format=<FileFormat> --device=<Boolean> --pixelsize=<Double> --image_max=<Integer>
#### Optional processing parameters:
#### --masks
#### --save_nth_image
#### --image_crop
#### --images_gpu
#### --confidence
#### --detect_reflections
#### --detect_oval_droplets
#### --min_aspect_ratio
#### --detect_adhesive_droplets
#### --save_coordinates
#### --min_velocity
#### --min_size_diff
#### --n_images_compared
#### --n_adhesive_high
#### --n_adhesive_low
#### --low_distance_threshold
#### --edge_tolerance
#### --contrast