#!/bin/bash

# Slurm job options (name, compute nodes, job time)
#SBATCH --exclusive
#SBATCH --nodes=1
#SBATCH --tasks-per-node=24
#SBATCH --cpus-per-task=1
#SBATCH --output=/work/ec137/ec137/kwu34/output/%j.out
#SBATCH --error=/work/ec137/ec137/kwu34/error/%j.err
#SBATCH --time=4-00:00

# Replace [budget code] below with your budget code (e.g. t01)
#SBATCH --account=ec137
# We use the "standard" partition as we are running on CPU nodes
#SBATCH --partition=standard
# We use the "standard" QoS as our runtime is less than 4 days
#SBATCH --qos=standard

export PYTHONUSERBASE=/work/ec137/ec137/kwu34
export PATH=${PYTHONUSERBASE}/bin:${PATH}
#export PYTHONPATH=${PYTHONUSERBASE}/lib/python3.7/site-packages:${PYTHONPATH}
#module load python/3.7.16
module load anaconda/python3
source /work/ec137/ec137/kwu34/.bashrc
conda activate /mnt/lustre/indy2lfs/work/ec137/ec137/kwu34/py36
module load cmake/3.22.1
module load zlib

# Change to the submission directory
cd $SLURM_SUBMIT_DIR

# Set the number of threads to 1
#   This prevents any threaded system libraries from automatically
#   using threading.
#export OMP_NUM_THREADS=$NSLOTS
export PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python

OPENAI_LOG_FORMAT=tensorboard,stdout,csv,log python -m baselines.run_exp --env=FishingDerbyNoFrameskip-v4 --num_timesteps=1e9 --save_path=./models/test --log_path=./logs/test --num_env=8

