...
Code Block | ||||
---|---|---|---|---|
| ||||
#!/bin/bash #SBATCH -q np #SBATCH -N 3 #SBATCH --hint=nomultithread #SBATCH -J test-coupled #SBATCH -o test-coupled-%j.out #SBATCH -e test-coupled-%j.out executables="exe1 exe2 exe3" mpi_tasks_exe1=128 omp_threads_exe1=1 mpi_tasks_exe2=64 omp_threads_exe2=2 mpi_tasks_exe3=32 omp_threads_exe3=4 set -e # Load environment module load hpcx-openmpi # Fetch and build affinity checker xthi # Tweak it slightly to be able to simulate different executables [[ ! -e xthi.c ]] && wget https://docs.nersc.gov/jobs/affinity/xthi.c sed -i -e "s/Hello from/%s/" -e "s/ rank, thread/ argv[0], rank, thread/" xthi.c mpicc -fopenmp -o xthi xthi.c # Simulate different executables srun_line="" for e in $executables; do # Simulate different executables ln -sf xthi $e # Configure srun step for executable mpi_tasks_var=mpi_tasks_$e omp_threads_var=omp_threads_$e srun_line+=": -n ${!mpi_tasks_var} -c ${!omp_threads_var} ./$e " done srun_line=${srun_line:1} # Ensure OpenMP correct pinning export OMP_PLACES=threads # Avoid PMI hangs with heterogeneous jobs export SLURM_MPI_TYPE=none # Use srun in heterogeneous steps mode. # Each executable will have a different layout, minimum allocation is one node for each. # See https://slurm.schedmd.com/heterogeneous_jobs.html#het_steps srun $srun_line | sort -k 3,3n -k 5,5n |
...