File size: 4,637 Bytes
259a194 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
#!/bin/bash
#SBATCH --job-name=bench_cluster
#SBATCH --time=00:59:00
#SBATCH --partition=hopper-prod
#SBATCH --nodes=2
#SBATCH --gres=gpu:8
#SBATCH --qos=high
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=96
#SBATCH --exclusive
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/log.out
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/log.out
# Function to update status based on squeue output
update_status() {
job_id=$1
status_file=$2
# For unknown reasons, it doenst update status for pending. It only works for running
while true; do
job_status=$(squeue --job $job_id --noheader --format=%T)
echo "Job status: $job_status"
if [ -z "$job_status" ]; then
# Job has finished or is not found
break
elif [ "$job_status" = "RUNNING" ]; then
printf "running" > $status_file
break
fi
sleep 10
done
}
# Misc initializations.
echo "========================"
echo "START TIME: $(date)"
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
echo python3 version = $(python3 --version)
echo "========================"
# Slurm stuff
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
export MASTER_PORT=$((1024 + RANDOM % 64511))
export TMPDIR=/scratch
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
export CUDA_DEVICE_MAX_CONNECTIONS="1"
huggingface-cli login --token $HUGGINGFACE_TOKEN
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/config.yaml"
LAUNCHER="torchrun \
--nproc_per_node 8 \
--nnodes 2 \
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
--rdzv_backend c10d \
--max_restarts 0 \
--tee 3 \
--node_rank ${SLURM_PROCID}"
# Checkout the bench_cluster branch
cd $NANOTRON_REPO
git checkout bench_cluster
cd ..
# Get the current job ID
job_id=${SLURM_JOB_ID}
# Update status to "pending" or "running" in the background
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/status.txt &
# Run the main command
srun -u $LAUNCHER $CMD
exit_status=$?
# Update status based on the exit status of `srun`
if [ $exit_status -eq 0 ]; then
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/status.txt
else
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/log.out; then
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/status.txt
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/log.out; then
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/status.txt
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/log.out; then
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/status.txt
else
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2/status.txt
fi
fi
# Run the report script if the job completed successfully
if [ $exit_status -eq 0 ]; then
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2 --is_logs
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2 --is_profiler
fi
# Push to hub the folder using huggingface_cli
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2 llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2 --commit-message "Upload llama-1B/16_GPUS/dp-2_tp-1_pp-8_mbz-2"
# Verify the upload
if [ $? -eq 0 ]; then
echo "Uploading to Huggingface Hub successful"
else
echo "Failed to upload to Huggingface Hub"
fi |