This repository was archived by the owner on Mar 1, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 80
Satori Cluster
Valentin Churavy edited this page May 14, 2020
·
18 revisions
The Satori user documentation is available at https://mit-satori.github.io/ and the below focuses on running ClimateMachine.jl on Satori. If you run into troubles or have questions contact either @vchuravy or @christophernhill
module load spack git
module load julia/1.3.0 cuda/10.2
module use /home/cnh/modules
module load spectrum_mpi/10.3.0.00rtm0
export HOME2 = /nobackup/users/`whoami`/
cd ${HOME2}
git clone https://github.com/CliMA/ClimateMachine.jl ClimateMachine
!/bin/bash
# Begin SLURM Directives
#SBATCH --job-name=ClimateMachine
#SBATCH --time=30:00
#SBATCH --ntasks=4
#SBATCH --ntasks-per-node=4
#SBATCH --gres="gpu:4" # GPUs per Node
#SBATCH --cpus-per-task=4
# Clear the environment from any previously loaded modules
module purge > /dev/null 2>&1
module use /home/cnh/modules
module load spack git
module load julia/1.3.0 cuda/10.2
module load spectrum_mpi/10.3.0.00rtm0
export HOME2=/nobackup/users/`whoami`/
export JULIA_PROJECT=${HOME2}/ClimateMachine
export JULIA_DEPOT=${HOME2}/julia_depot
export JULIA_MPI_BINARY=system
export JULIA_CUDA_USE_BINARYBUILDER=false
julia -e 'using Pkg; pkg"instantiate"; pkg"build"; pkg"precompile"' > /dev/null 2>&1
# Cleaning `CUDA_VISIBLE_DEVICES`
# This is needed to take advantage of faster local CUDA-aware communication
# Make sure that each rank uses the right GPU afterwards
cat > launch.sh << EoF_s
#! /bin/sh
export CUDA_VISIBLE_DEVICES=0,1,2,3
exec \$*
EoF_s
chmod +x launch.sh
EXPERIMENT=${HOME2}/ClimateMachine/experiments/AtmosLES/dycoms.jl
# Note this should use `srun --mpi` when using the OpenMPI module instead of Spectrum MPI
mpiexec ./launch.sh julia ${EXPERIMENT} --output-dir=${HOME2}/clima-${SLURM_JOB_ID}