#!/bin/bash #SBATCH --job-name=spark-multi-node # Exclusive mode is recommended for all spark jobs #SBATCH --exclusive #SBATCH --nodes=4 #SBATCH --time=10 module load spark # This command starts the spark workers on the allocated nodes start-spark-slurm.sh # This syntax tells the spark workers where the master is export MASTER=spark://$HOSTNAME:7077 # This is a scala example run-example SparkPi # This is a python example. # For production jobs, you'll probably want to have a python module loaded. # This will use the system python if you don't have a python module loaded. spark-submit --master $MASTER $SPARK_HOME/examples/src/main/python/pi.py