Skip to content
Snippets Groups Projects
slurm_llama_inference.sh 1.08 KiB
Newer Older
Atharva Jadhav's avatar
Atharva Jadhav committed
#!/usr/bin/zsh

### Add basic configuration for job
Atharva Jadhav's avatar
Atharva Jadhav committed
#SBATCH --account=rwth1776
Atharva Jadhav's avatar
Atharva Jadhav committed
#SBATCH --job-name=llama_inference
#SBATCH --output=logs/llama_inference_%j.log
Atharva Jadhav's avatar
Atharva Jadhav committed
#SBATCH --error=logs/llama_inference_error%j.log
Atharva Jadhav's avatar
Atharva Jadhav committed
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=3
#SBATCH --gres=gpu:1
#SBATCH --time=05:00:00
Atharva Jadhav's avatar
Atharva Jadhav committed


###------------------------------------------------------------------------------------------------------------------------------

### Run the project in work directory of the cluster (configure based on need!! 
### RWTH File System : https://help.itc.rwth-aachen.de/en/service/rhr4fjjutttf/article/da307ec2c60940b29bd42ac483fc3ea7/
cd $HPCWORK
cd codebud/inference
###------------------------------------------------------------------------------------------------------------------------------

### JOB SCRIPT RUN
module load GCCcore/.13.2.0
module load Python/3.11.5
module load CUDA

source ../../venvs/codebud/bin/activate
echo $VIRTUAL_ENV

python --version

Atharva Jadhav's avatar
Atharva Jadhav committed
python llama_finetuned_inference.py
Atharva Jadhav's avatar
Atharva Jadhav committed

module unload CUDA
module unload Python/3.11.5

deactivate
echo "Script ran successfully"