From 1df7b1580550fcf401b8f97e0079dd23f0bc90a2 Mon Sep 17 00:00:00 2001
From: ssibirtsev <sibi_ballad@gmx.de>
Date: Wed, 15 Nov 2023 14:48:57 +0100
Subject: [PATCH] Upload New File

---
 manual/cluster_job_CPU_training_template.job | 81 ++++++++++++++++++++
 1 file changed, 81 insertions(+)
 create mode 100644 manual/cluster_job_CPU_training_template.job

diff --git a/manual/cluster_job_CPU_training_template.job b/manual/cluster_job_CPU_training_template.job
new file mode 100644
index 0000000..f4df59c
--- /dev/null
+++ b/manual/cluster_job_CPU_training_template.job
@@ -0,0 +1,81 @@
+#!/usr/bin/zsh
+
+##############################################
+##### Batch script for the MRCNN training ####
+##############################################
+
+#### CREATE SBATCH ENTRIES ####
+#### Paths and parameters must be adapted accordingly. 
+
+#### job name 
+#SBATCH --job-name=<JobName>
+
+#### Path and name of the output file of the job execution 
+#SBATCH --output=/home/<UserID>/.../<JobOutputFolderName>/%x_%J_output.txt
+
+#### Job runtime determined by testing jobs on the GPU node (see manual). 
+#### Multiply the computing time per epoch resulting from the test by the number of epochs to be trained. 
+#### Add a safety factor, e.g. multiply with 1.2 
+#SBATCH --time=0-00:00:00
+
+#### Memory requirement per CPU determined by testing jobs on the GPU node (see manual). 
+#### Add a safety factor, e.g. multiply with 1.2. 
+#### For example: resulting value is 5GB --> --mem-per-cpu=5G 
+#SBATCH --mem-per-cpu=5G
+
+#### E-mail address 
+#SBATCH --mail-user=<EmailAdress>
+
+#### E-mails to be received 
+#SBATCH --mail-type=ALL
+
+#### Number of tasks to be performed 
+#SBATCH --ntasks=1
+
+#### CREATE TERMINAL ENTRIES ####
+#### Paths and parameters must be adapted accordingly 
+
+#### Export path in which Anaconda is located 
+export PATH=$PATH:/home/<UserID>/anaconda3/bin
+
+#### Activate environment 
+source activate env_mrcnn_cpu
+
+#### Navigate to the path where the droplet.py script is located
+cd /home/<UserID>/.../samples/droplet/
+
+#### Run the droplet.py script.
+#### These are the required training parameters to be specified.
+#### Optional training parameters can be found below. 
+#### Description/default settings of all training parameters see manual. 
+python train_droplet.py --dataset_path=<TrainValidationFolderName> --file_format=<FileFormat> --image_max=<Integer> --images_gpu=<Integer> --device=False 
+
+#### Optional training parameters: 
+#### --name_result_file  
+#### --new_weights_path
+#### --base_weights 
+#### --train_all_layers 
+#### --masks 
+#### --dataset_quantity 
+#### --cross_validation
+#### --k_fold
+#### --k_fold_val
+#### --epochs 
+#### --early_stopping 
+#### --early_loss 
+#### --use_wandb 
+#### --wandb_entity 
+#### --wandb_project 
+#### --wandb_group 
+#### --wandb_run 
+#### --backbone_type 
+#### --learning
+#### --momentum
+#### --w_decay
+#### --augmentation 
+#### --flip
+#### --cropandpad
+#### --rotate
+#### --noise
+#### --gamma
+#### --contrast 
\ No newline at end of file
-- 
GitLab