3outeille HF staff commited on
Commit
825f397
1 Parent(s): a3838bb

Upload llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2

Browse files
llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/bench.slurm ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_cluster
4
+ #SBATCH --time=02:00:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=1
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/log.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/log.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
38
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
39
+ echo python3 version = $(python3 --version)
40
+ echo "========================"
41
+
42
+ # Slurm stuff
43
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
44
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
45
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
46
+
47
+ export TMPDIR=/scratch
48
+ export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
49
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
50
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
51
+
52
+ huggingface-cli login --token $HUGGINGFACE_TOKEN
53
+
54
+
55
+ NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
56
+ CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/config.yaml"
57
+
58
+ LAUNCHER="torchrun \
59
+ --nproc_per_node 8 \
60
+ --nnodes 1 \
61
+ --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
62
+ --rdzv_backend c10d \
63
+ --max_restarts 0 \
64
+ --tee 3 \
65
+ --node_rank ${SLURM_PROCID}"
66
+
67
+ # Checkout the bench_cluster branch
68
+ cd $NANOTRON_REPO
69
+ git checkout bench_cluster
70
+ cd ..
71
+ # Get the current job ID
72
+ job_id=${SLURM_JOB_ID}
73
+
74
+ # Update status to "pending" or "running" in the background
75
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/status.txt &
76
+
77
+ # Run the main command
78
+ srun -u $LAUNCHER $CMD
79
+ exit_status=$?
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/log.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/log.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/status.txt
89
+ elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/log.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/status.txt
93
+ fi
94
+ fi
95
+
96
+ # Run the report script if the job completed successfully
97
+ if [ $exit_status -eq 0 ]; then
98
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2 --is_logs
99
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2 --is_profiler
100
+ fi
101
+
102
+
103
+ # Push to hub the folder using huggingface_cli
104
+ huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2 llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2 --commit-message "Upload llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2"
105
+
106
+ # Verify the upload
107
+ if [ $? -eq 0 ]; then
108
+ echo "Uploading to Huggingface Hub successful"
109
+ else
110
+ echo "Failed to upload to Huggingface Hub"
111
+ fi
llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/config.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ general:
2
+ project: bench_cluster
3
+ seed: 42
4
+ model:
5
+ ddp_bucket_cap_mb: 25
6
+ dtype: bfloat16
7
+ init_method:
8
+ std: 0.025
9
+ make_vocab_size_divisible_by: 1
10
+ model_config:
11
+ bos_token_id: 1
12
+ eos_token_id: 2
13
+ hidden_act: silu
14
+ hidden_size: 2048
15
+ initializer_range: 0.02
16
+ intermediate_size: 4096
17
+ is_llama_config: true
18
+ max_position_embeddings: 4096
19
+ num_attention_heads: 32
20
+ num_hidden_layers: 24
21
+ num_key_value_heads: 32
22
+ pad_token_id: null
23
+ pretraining_tp: 1
24
+ rms_norm_eps: 1.0e-05
25
+ rope_scaling: null
26
+ rope_theta: 10000.0
27
+ tie_word_embeddings: true
28
+ use_cache: true
29
+ vocab_size: 50257
30
+ optimizer:
31
+ accumulate_grad_in_fp32: true
32
+ clip_grad: 1.0
33
+ learning_rate_scheduler:
34
+ learning_rate: 0.0001
35
+ lr_decay_style: linear
36
+ lr_warmup_style: linear
37
+ lr_warmup_steps: 1
38
+ min_decay_lr: 1.0e-05
39
+ optimizer_factory:
40
+ adam_beta1: 0.9
41
+ adam_beta2: 0.95
42
+ adam_eps: 1.0e-08
43
+ name: adamW
44
+ torch_adam_is_fused: true
45
+ weight_decay: 0.01
46
+ zero_stage: 1
47
+ parallelism:
48
+ dp: 1
49
+ expert_parallel_size: 1
50
+ pp: 2
51
+ pp_engine: 1f1b
52
+ tp: 4
53
+ tp_linear_async_communication: false
54
+ tp_mode: REDUCE_SCATTER
55
+ profiler:
56
+ profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2
57
+ tokenizer:
58
+ tokenizer_max_length: null
59
+ tokenizer_name_or_path: openai-community/gpt2
60
+ tokenizer_revision: null
61
+ data_stages:
62
+ - name: Training Stage
63
+ start_training_step: 1
64
+ data:
65
+ dataset:
66
+ dataset_overwrite_cache: false
67
+ dataset_processing_num_proc_per_process: 64
68
+ hf_dataset_config_name: null
69
+ hf_dataset_or_datasets: roneneldan/TinyStories
70
+ hf_dataset_splits: train
71
+ text_column_name: text
72
+ num_loading_workers: 0
73
+ seed: 42
74
+ lighteval: null
75
+ tokens:
76
+ train_steps: 20
77
+ val_check_interval: -1
78
+ batch_accumulation_per_replica: 512
79
+ limit_test_batches: 0
80
+ limit_val_batches: 0
81
+ micro_batch_size: 2
82
+ sequence_length: 4096
83
+ logging:
84
+ iteration_step_info_interval: 1
85
+ log_level: info
86
+ log_level_replica: info
87
+ checkpoints:
88
+ checkpoint_interval: 100000
89
+ checkpoints_path: /dev/null
90
+ resume_checkpoint_path: null
llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/log.out ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ========================
2
+ START TIME: Wed Jul 3 21:10:37 UTC 2024
3
+ python3 version = Python 3.10.14
4
+ ========================
5
+ The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
6
+ Token is valid (permission: write).
7
+ Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
8
+ Login successful
9
+ Already on 'bench_cluster'
10
+ M examples/config_tiny_llama.py
11
+ M examples/config_tiny_llama.yaml
12
+ M examples/train_tiny_llama.sh
13
+ M src/nanotron/models/llama.py
14
+ M src/nanotron/trainer.py
15
+ Your branch is up to date with 'origin/bench_cluster'.
16
+ Job status: RUNNING
17
+ W0703 21:10:40.506000 139917486880576 torch/distributed/run.py:757]
18
+ W0703 21:10:40.506000 139917486880576 torch/distributed/run.py:757] *****************************************
19
+ W0703 21:10:40.506000 139917486880576 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
20
+ W0703 21:10:40.506000 139917486880576 torch/distributed/run.py:757] *****************************************
21
+ [default0]:07/03/2024 21:10:56 [WARNING|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Vocab Size Padding] Padded vocab (size: 50257) with 3 dummy tokens (new size: 50260)
22
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Config:
23
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Config(general=GeneralArgs(project='bench_cluster',
24
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: run='%date_%jobid',
25
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: seed=42,
26
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: step=None,
27
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: consumed_train_samples=None,
28
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: benchmark_csv_path=None,
29
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: ignore_sanity_checks=True),
30
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: parallelism=ParallelismArgs(dp=1,
31
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pp=2,
32
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp=4,
33
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f08da39c880>,
34
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
35
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp_linear_async_communication=False,
36
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: expert_parallel_size=1),
37
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
38
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: eos_token_id=2,
39
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_act='silu',
40
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_size=2048,
41
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: initializer_range=0.02,
42
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: intermediate_size=4096,
43
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: is_llama_config=True,
44
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: max_position_embeddings=4096,
45
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_attention_heads=32,
46
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_hidden_layers=24,
47
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_key_value_heads=32,
48
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pad_token_id=None,
49
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pretraining_tp=1,
50
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rms_norm_eps=1e-05,
51
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_scaling=None,
52
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_theta=10000.0,
53
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tie_word_embeddings=True,
54
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: use_cache=True,
55
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: vocab_size=50260),
56
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: init_method=RandomInit(std=0.025),
57
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dtype=torch.bfloat16,
58
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: make_vocab_size_divisible_by=1,
59
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: ddp_bucket_cap_mb=25),
60
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
61
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer_revision=None,
62
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer_max_length=None),
63
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
64
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoint_interval=100000,
65
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: save_initial_state=False,
66
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: resume_checkpoint_path=None,
67
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoints_path_is_shared_file_system=False),
68
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: logging=LoggingArgs(log_level='info',
69
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: log_level_replica='info',
70
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: iteration_step_info_interval=1),
71
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokens=TokensArgs(sequence_length=4096,
72
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: train_steps=20,
73
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: micro_batch_size=2,
74
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: batch_accumulation_per_replica=512,
75
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: val_check_interval=-1,
76
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: limit_val_batches=0,
77
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: limit_test_batches=0),
78
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
79
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: adam_beta1=0.9,
80
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: adam_beta2=0.95,
81
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: torch_adam_is_fused=True,
82
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: name='adamW'),
83
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: zero_stage=1,
84
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: weight_decay=0.01,
85
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: clip_grad=1.0,
86
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: accumulate_grad_in_fp32=True,
87
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
88
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_warmup_steps=1,
89
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_warmup_style='linear',
90
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_style='linear',
91
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_steps=19,
92
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_starting_step=None,
93
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: min_decay_lr=1e-05)),
94
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: data_stages=[DatasetStageArgs(name='Training Stage',
95
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: start_training_step=1,
96
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
97
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hf_dataset_splits='train',
98
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hf_dataset_config_name=None,
99
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dataset_processing_num_proc_per_process=64,
100
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dataset_overwrite_cache=False,
101
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: text_column_name='text'),
102
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: seed=42,
103
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_loading_workers=0))],
104
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2')),
105
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lighteval=None)
106
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Model Config:
107
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: LlamaConfig(bos_token_id=1,
108
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: eos_token_id=2,
109
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_act='silu',
110
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_size=2048,
111
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: initializer_range=0.02,
112
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: intermediate_size=4096,
113
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: is_llama_config=True,
114
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: max_position_embeddings=4096,
115
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_attention_heads=32,
116
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_hidden_layers=24,
117
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_key_value_heads=32,
118
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pad_token_id=None,
119
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pretraining_tp=1,
120
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rms_norm_eps=1e-05,
121
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_scaling=None,
122
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_theta=10000.0,
123
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tie_word_embeddings=True,
124
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: use_cache=True,
125
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: vocab_size=50260)
126
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Building model..
127
+ [default0]:07/03/2024 21:10:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Setting PP block ranks...
128
+ [default4]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: Local number of parameters: 131M (249.16MiB)
129
+ [default7]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=3|ip-26-0-174-36]: Local number of parameters: 131M (249.16MiB)
130
+ [default7]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=3|ip-26-0-174-36]: [After model building] Memory usage: 260.10MiB. Peak allocated: 262.13MiB Peak reserved: 264.00MiB
131
+ [default7]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=3|ip-26-0-174-36]: No checkpoint path provided.
132
+ [default4]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: [After model building] Memory usage: 260.10MiB. Peak allocated: 262.13MiB Peak reserved: 264.00MiB
133
+ [default4]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: No checkpoint path provided.
134
+ [default0]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Total number of parameters: 1.21G (2313.42MiB)
135
+ [default0]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Local number of parameters: 173M (329.19MiB)
136
+ [default0]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [After model building] Memory usage: 344.13MiB. Peak allocated: 346.16MiB Peak reserved: 348.00MiB
137
+ [default0]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
138
+ [default0]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Parametrizing model parameters using StandardParametrizator
139
+ [default1]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=1|ip-26-0-174-36]: Local number of parameters: 173M (329.19MiB)
140
+ [default1]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=1|ip-26-0-174-36]: [After model building] Memory usage: 344.13MiB. Peak allocated: 346.16MiB Peak reserved: 348.00MiB
141
+ [default1]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=1|ip-26-0-174-36]: No checkpoint path provided.
142
+ [default5]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=1|ip-26-0-174-36]: Local number of parameters: 131M (249.16MiB)
143
+ [default5]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=1|ip-26-0-174-36]: [After model building] Memory usage: 260.10MiB. Peak allocated: 262.13MiB Peak reserved: 264.00MiB
144
+ [default3]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=3|ip-26-0-174-36]: Local number of parameters: 173M (329.19MiB)
145
+ [default3]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=3|ip-26-0-174-36]: [After model building] Memory usage: 344.13MiB. Peak allocated: 346.16MiB Peak reserved: 348.00MiB
146
+ [default5]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=1|ip-26-0-174-36]: No checkpoint path provided.
147
+ [default3]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=3|ip-26-0-174-36]: No checkpoint path provided.
148
+ [default6]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=2|ip-26-0-174-36]: Local number of parameters: 131M (249.16MiB)
149
+ [default6]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=2|ip-26-0-174-36]: [After model building] Memory usage: 260.10MiB. Peak allocated: 262.13MiB Peak reserved: 264.00MiB
150
+ [default6]:07/03/2024 21:11:10 [INFO|DP=0|PP=1|TP=2|ip-26-0-174-36]: No checkpoint path provided.
151
+ [default2]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=2|ip-26-0-174-36]: Local number of parameters: 173M (329.19MiB)
152
+ [default2]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=2|ip-26-0-174-36]: [After model building] Memory usage: 344.13MiB. Peak allocated: 346.16MiB Peak reserved: 348.00MiB
153
+ [default2]:07/03/2024 21:11:10 [INFO|DP=0|PP=0|TP=2|ip-26-0-174-36]: No checkpoint path provided.
154
+ [default0]:07/03/2024 21:11:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Optimizer Building] Using LearningRateForSP as learning rate
155
+ [default0]:07/03/2024 21:11:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] Size of optimizer params per rank:
156
+ [default0]:07/03/2024 21:11:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 0 has 173M out of 173M (100.00%) params' optimizer states
157
+ [default0]:07/03/2024 21:11:12 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
158
+ [default0]:07/03/2024 21:11:12 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Using `datasets` library
159
+ [default0]:07/03/2024 21:11:12 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
160
+ [default0]:07/03/2024 21:11:12 [WARNING|DP=0|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
161
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
162
+ [default0]:07/03/2024 21:11:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Training Plan] There are 1 training stages
163
+ [default0]:07/03/2024 21:11:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Stage Training Stage] start from step 1
164
+ [default0]:07/03/2024 21:11:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]:
165
+ [default0]:07/03/2024 21:11:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Start training] datetime: 2024-07-03 21:11:13.517381 | mbs: 2 | grad_accum: 512 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
166
+ [default0]:07/03/2024 21:11:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
167
+ [default0]:07/03/2024 21:11:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 1660.89MiB. Peak allocated 1660.89MiB. Peak reserved: 1668.00MiB
168
+ [default4]:07/03/2024 21:11:13 [WARNING|DP=0|PP=1|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
169
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
170
+ [default5]:07/03/2024 21:11:13 [WARNING|DP=0|PP=1|TP=1|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
171
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
172
+ [default7]:07/03/2024 21:11:13 [WARNING|DP=0|PP=1|TP=3|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
173
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
174
+ [default1]:07/03/2024 21:11:13 [WARNING|DP=0|PP=0|TP=1|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
175
+ [default3]:07/03/2024 21:11:13 [WARNING|DP=0|PP=0|TP=3|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
176
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
177
+ [default6]:07/03/2024 21:11:13 [WARNING|DP=0|PP=1|TP=2|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
178
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
179
+ [default2]:07/03/2024 21:11:13 [WARNING|DP=0|PP=0|TP=2|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
180
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
181
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
182
+ [default4]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
183
+ [default4]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
184
+ [default6]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
185
+ [default6]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
186
+ [default5]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
187
+ [default5]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
188
+ [default7]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
189
+ [default7]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
190
+ [default3]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
191
+ [default3]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
192
+ [default1]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
193
+ [default1]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
194
+ [default0]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
195
+ [default0]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
196
+ [default2]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
197
+ [default2]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
198
+ [default0]:07/03/2024 21:12:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 1731.96MiB. Peak allocated 7525.35MiB. Peak reserved: 7764.00MiB
199
+ [default0]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
200
+ [default0]: warnings.warn(
201
+ [default5]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
202
+ [default5]: warnings.warn(
203
+ [default1]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
204
+ [default1]: warnings.warn(
205
+ [default4]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
206
+ [default4]: warnings.warn(
207
+ [default3]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
208
+ [default3]: warnings.warn(
209
+ [default6]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
210
+ [default6]: warnings.warn(
211
+ [default2]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
212
+ [default2]: warnings.warn(
213
+ [default7]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
214
+ [default7]: warnings.warn(
215
+ [default0]:07/03/2024 21:12:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 3048.77MiB. Peak allocated 3048.77MiB. Peak reserved: 7764.00MiB
216
+ [default4]:07/03/2024 21:12:18 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: iteration: 1 / 20 | consumed_tokens: 4.19M | elapsed_time_per_iteration_ms: 64.6K | tokens_per_sec: 64.9K | tokens_per_sec_per_gpu: 8.11K | global_batch_size: 1.02K | lm_loss: 11.1 | lr: 0.0001 | model_tflops_per_gpu: 73.6 | hardware_tflops_per_gpu: 73.6 | grad_norm: 15 | cuda_memory_allocated: 2.44G | cuda_max_memory_reserved: 5.02G | hd_total_memory_tb: 312G | hd_used_memory_tb: 65.8G | hd_free_memory_tb: 246G
217
+ [default0]:07/03/2024 21:13:00 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 3048.77MiB. Peak allocated 8716.40MiB. Peak reserved: 8980.00MiB
218
+ [default4]:07/03/2024 21:13:00 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: iteration: 2 / 20 | consumed_tokens: 8.39M | elapsed_time_per_iteration_ms: 41.7K | tokens_per_sec: 101K | tokens_per_sec_per_gpu: 12.6K | global_batch_size: 1.02K | lm_loss: 11.1 | lr: 9.53e-05 | model_tflops_per_gpu: 114 | hardware_tflops_per_gpu: 114 | grad_norm: 15.1 | cuda_memory_allocated: 2.44G | cuda_max_memory_reserved: 5.44G | hd_total_memory_tb: 312G | hd_used_memory_tb: 65.8G | hd_free_memory_tb: 246G
219
+ [default0]:07/03/2024 21:13:00 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 3048.77MiB. Peak allocated 3048.81MiB. Peak reserved: 8980.00MiB
220
+ [default0]:07/03/2024 21:13:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 3048.77MiB. Peak allocated 8716.40MiB. Peak reserved: 8980.00MiB
221
+ [default0]:STAGE:2024-07-03 21:13:48 219616:219616 ActivityProfilerController.cpp:314] Completed Stage: Warm Up
222
+ [default4]:07/03/2024 21:13:48 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: iteration: 3 / 20 | consumed_tokens: 12.6M | elapsed_time_per_iteration_ms: 47.9K | tokens_per_sec: 87.6K | tokens_per_sec_per_gpu: 10.9K | global_batch_size: 1.02K | lm_loss: 11.4 | lr: 9.05e-05 | model_tflops_per_gpu: 99.3 | hardware_tflops_per_gpu: 99.3 | grad_norm: 106 | cuda_memory_allocated: 2.44G | cuda_max_memory_reserved: 5.44G | hd_total_memory_tb: 312G | hd_used_memory_tb: 65.8G | hd_free_memory_tb: 246G
223
+ [default0]:07/03/2024 21:13:48 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 3048.77MiB. Peak allocated 3048.81MiB. Peak reserved: 8980.00MiB
224
+ [default0]:07/03/2024 21:14:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 3048.77MiB. Peak allocated 8716.40MiB. Peak reserved: 8980.00MiB
225
+ [default4]:07/03/2024 21:14:53 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: iteration: 4 / 20 | consumed_tokens: 16.8M | elapsed_time_per_iteration_ms: 64.5K | tokens_per_sec: 65K | tokens_per_sec_per_gpu: 8.13K | global_batch_size: 1.02K | lm_loss: 11.7 | lr: 8.58e-05 | model_tflops_per_gpu: 73.8 | hardware_tflops_per_gpu: 73.8 | grad_norm: 24.6 | cuda_memory_allocated: 2.44G | cuda_max_memory_reserved: 5.44G | hd_total_memory_tb: 312G | hd_used_memory_tb: 65.8G | hd_free_memory_tb: 246G
226
+ [default0]:07/03/2024 21:14:53 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 3048.77MiB. Peak allocated 3048.81MiB. Peak reserved: 8980.00MiB
227
+ [default0]:07/03/2024 21:15:57 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 3048.77MiB. Peak allocated 8716.40MiB. Peak reserved: 8980.00MiB
228
+ [default4]:07/03/2024 21:15:57 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: iteration: 5 / 20 | consumed_tokens: 21M | elapsed_time_per_iteration_ms: 64.1K | tokens_per_sec: 65.4K | tokens_per_sec_per_gpu: 8.18K | global_batch_size: 1.02K | lm_loss: 10 | lr: 8.11e-05 | model_tflops_per_gpu: 74.2 | hardware_tflops_per_gpu: 74.2 | grad_norm: 11
229
+ [default4]:07/03/2024 21:17:01 [INFO|DP=0|PP=1|TP=0|ip-26-0-174-36]: iteration: 6 / 20 | consumed_tokens: 25.2M | elapsed_time_per_iteration_ms: 64.6K | tokens_per_sec: 64.9K | tokens_per_sec_per_gpu: 8.11K | global_batch_size: 1.02K | lm_loss: 9.46 | lr: 7.63e-05 | model_tflops_per_gpu: 73.6 | hardware_tflops_per_gpu: 73.6 | grad_norm: 7.2
230
+ [default0]:STAGE:2024-07-03 21:19:57 219616:219616 ActivityProfilerController.cpp:320] Completed Stage: Collection
231
+ [default0]:STAGE:2024-07-03 21:20:15 219616:219616 ActivityProfilerController.cpp:324] Completed Stage: Post Processing
232
+ [default2]:[rank2]:[E ProcessGroupNCCL.cpp:563] [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=350245, OpType=_REDUCE_SCATTER_BASE, NumelIn=16777216, NumelOut=4194304, Timeout(ms)=600000) ran for 600014 milliseconds before timing out.
233
+ [default4]:[rank4]:[E ProcessGroupNCCL.cpp:563] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600036 milliseconds before timing out.
234
+ [default5]:[rank5]:[E ProcessGroupNCCL.cpp:563] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600003 milliseconds before timing out.
235
+ [default3]:[rank3]:[E ProcessGroupNCCL.cpp:563] [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=350245, OpType=_REDUCE_SCATTER_BASE, NumelIn=16777216, NumelOut=4194304, Timeout(ms)=600000) ran for 600013 milliseconds before timing out.
236
+ [default7]:[rank7]:[E ProcessGroupNCCL.cpp:563] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600027 milliseconds before timing out.
237
+ [default6]:[rank6]:[E ProcessGroupNCCL.cpp:563] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600071 milliseconds before timing out.
238
+ [default1]:[rank1]:[E ProcessGroupNCCL.cpp:563] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=350245, OpType=_REDUCE_SCATTER_BASE, NumelIn=16777216, NumelOut=4194304, Timeout(ms)=600000) ran for 600088 milliseconds before timing out.
239
+ [default7]:[rank7]: Traceback (most recent call last):
240
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
241
+ [default7]:[rank7]: trainer.train(dataloader)
242
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
243
+ [default7]:[rank7]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
244
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
245
+ [default7]:[rank7]: outputs = self.pipeline_engine.train_batch_iter(
246
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
247
+ [default7]:[rank7]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
248
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
249
+ [default7]:[rank7]: output = model(**micro_batch)
250
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
251
+ [default7]:[rank7]: return self._call_impl(*args, **kwargs)
252
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
253
+ [default7]:[rank7]: return forward_call(*args, **kwargs)
254
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
255
+ [default7]:[rank7]: sharded_logits = self.model(
256
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
257
+ [default7]:[rank7]: return self._call_impl(*args, **kwargs)
258
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
259
+ [default7]:[rank7]: return forward_call(*args, **kwargs)
260
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
261
+ [default7]:[rank7]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
262
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
263
+ [default7]:[rank7]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
264
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
265
+ [default7]:[rank7]: return self._call_impl(*args, **kwargs)
266
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
267
+ [default7]:[rank7]: return forward_call(*args, **kwargs)
268
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 126, in forward
269
+ [default7]:[rank7]: new_kwargs[name] = recv_from_pipeline_state_buffer(
270
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/functional.py", line 117, in recv_from_pipeline_state_buffer
271
+ [default7]:[rank7]: pipeline_state.run_communication()
272
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 150, in run_communication
273
+ [default7]:[rank7]: recv_activation_tensor = recv_activation()
274
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 31, in __call__
275
+ [default7]:[rank7]: return self.p2p.recv_tensors(num_tensors=1, from_rank=self.from_rank)[0]
276
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 353, in recv_tensors
277
+ [default7]:[rank7]: buffers, futures = self.irecv_tensors(num_tensors=num_tensors, from_rank=from_rank, tag=tag)
278
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 326, in irecv_tensors
279
+ [default7]:[rank7]: meta = self._recv_meta(from_rank=from_rank, tag=tag)
280
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 269, in _recv_meta
281
+ [default7]:[rank7]: dist.recv(
282
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/c10d_logger.py", line 75, in wrapper
283
+ [default7]:[rank7]: return func(*args, **kwargs)
284
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 1932, in recv
285
+ [default7]:[rank7]: pg.recv([tensor], group_src_rank, tag).wait()
286
+ [default7]:[rank7]: torch.distributed.DistBackendError: NCCL communicator was aborted on rank 1.
287
+ [default4]:[rank4]: Traceback (most recent call last):
288
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
289
+ [default4]:[rank4]: trainer.train(dataloader)
290
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
291
+ [default4]:[rank4]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
292
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
293
+ [default4]:[rank4]: outputs = self.pipeline_engine.train_batch_iter(
294
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
295
+ [default4]:[rank4]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
296
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
297
+ [default4]:[rank4]: output = model(**micro_batch)
298
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
299
+ [default4]:[rank4]: return self._call_impl(*args, **kwargs)
300
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
301
+ [default4]:[rank4]: return forward_call(*args, **kwargs)
302
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
303
+ [default4]:[rank4]: sharded_logits = self.model(
304
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
305
+ [default4]:[rank4]: return self._call_impl(*args, **kwargs)
306
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
307
+ [default4]:[rank4]: return forward_call(*args, **kwargs)
308
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
309
+ [default4]:[rank4]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
310
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
311
+ [default4]:[rank4]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
312
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
313
+ [default4]:[rank4]: return self._call_impl(*args, **kwargs)
314
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
315
+ [default4]:[rank4]: return forward_call(*args, **kwargs)
316
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 126, in forward
317
+ [default4]:[rank4]: new_kwargs[name] = recv_from_pipeline_state_buffer(
318
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/functional.py", line 117, in recv_from_pipeline_state_buffer
319
+ [default4]:[rank4]: pipeline_state.run_communication()
320
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 150, in run_communication
321
+ [default4]:[rank4]: recv_activation_tensor = recv_activation()
322
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 31, in __call__
323
+ [default4]:[rank4]: return self.p2p.recv_tensors(num_tensors=1, from_rank=self.from_rank)[0]
324
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 353, in recv_tensors
325
+ [default4]:[rank4]: buffers, futures = self.irecv_tensors(num_tensors=num_tensors, from_rank=from_rank, tag=tag)
326
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 326, in irecv_tensors
327
+ [default4]:[rank4]: meta = self._recv_meta(from_rank=from_rank, tag=tag)
328
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 269, in _recv_meta
329
+ [default4]:[rank4]: dist.recv(
330
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/c10d_logger.py", line 75, in wrapper
331
+ [default4]:[rank4]: return func(*args, **kwargs)
332
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 1932, in recv
333
+ [default4]:[rank4]: pg.recv([tensor], group_src_rank, tag).wait()
334
+ [default4]:[rank4]: torch.distributed.DistBackendError: NCCL communicator was aborted on rank 1.
335
+ [default6]:[rank6]: Traceback (most recent call last):
336
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
337
+ [default6]:[rank6]: trainer.train(dataloader)
338
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
339
+ [default6]:[rank6]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
340
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
341
+ [default6]:[rank6]: outputs = self.pipeline_engine.train_batch_iter(
342
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
343
+ [default6]:[rank6]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
344
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
345
+ [default6]:[rank6]: output = model(**micro_batch)
346
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
347
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
348
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
349
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
350
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
351
+ [default6]:[rank6]: sharded_logits = self.model(
352
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
353
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
354
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
355
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
356
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
357
+ [default6]:[rank6]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
358
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
359
+ [default6]:[rank6]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
360
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
361
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
362
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
363
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
364
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 126, in forward
365
+ [default6]:[rank6]: new_kwargs[name] = recv_from_pipeline_state_buffer(
366
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/functional.py", line 117, in recv_from_pipeline_state_buffer
367
+ [default6]:[rank6]: pipeline_state.run_communication()
368
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 150, in run_communication
369
+ [default6]:[rank6]: recv_activation_tensor = recv_activation()
370
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 31, in __call__
371
+ [default6]:[rank6]: return self.p2p.recv_tensors(num_tensors=1, from_rank=self.from_rank)[0]
372
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 353, in recv_tensors
373
+ [default6]:[rank6]: buffers, futures = self.irecv_tensors(num_tensors=num_tensors, from_rank=from_rank, tag=tag)
374
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 326, in irecv_tensors
375
+ [default6]:[rank6]: meta = self._recv_meta(from_rank=from_rank, tag=tag)
376
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 269, in _recv_meta
377
+ [default6]:[rank6]: dist.recv(
378
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/c10d_logger.py", line 75, in wrapper
379
+ [default6]:[rank6]: return func(*args, **kwargs)
380
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 1932, in recv
381
+ [default6]:[rank6]: pg.recv([tensor], group_src_rank, tag).wait()
382
+ [default6]:[rank6]: torch.distributed.DistBackendError: NCCL communicator was aborted on rank 1.
383
+ [default5]:[rank5]: Traceback (most recent call last):
384
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
385
+ [default5]:[rank5]: trainer.train(dataloader)
386
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
387
+ [default5]:[rank5]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
388
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
389
+ [default5]:[rank5]: outputs = self.pipeline_engine.train_batch_iter(
390
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
391
+ [default5]:[rank5]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
392
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
393
+ [default5]:[rank5]: output = model(**micro_batch)
394
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
395
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
396
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
397
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
398
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
399
+ [default5]:[rank5]: sharded_logits = self.model(
400
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
401
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
402
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
403
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
404
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
405
+ [default5]:[rank5]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
406
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
407
+ [default5]:[rank5]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
408
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
409
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
410
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
411
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
412
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 126, in forward
413
+ [default5]:[rank5]: new_kwargs[name] = recv_from_pipeline_state_buffer(
414
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/functional.py", line 117, in recv_from_pipeline_state_buffer
415
+ [default5]:[rank5]: pipeline_state.run_communication()
416
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 150, in run_communication
417
+ [default5]:[rank5]: recv_activation_tensor = recv_activation()
418
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 31, in __call__
419
+ [default5]:[rank5]: return self.p2p.recv_tensors(num_tensors=1, from_rank=self.from_rank)[0]
420
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 353, in recv_tensors
421
+ [default5]:[rank5]: buffers, futures = self.irecv_tensors(num_tensors=num_tensors, from_rank=from_rank, tag=tag)
422
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 326, in irecv_tensors
423
+ [default5]:[rank5]: meta = self._recv_meta(from_rank=from_rank, tag=tag)
424
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 269, in _recv_meta
425
+ [default5]:[rank5]: dist.recv(
426
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/c10d_logger.py", line 75, in wrapper
427
+ [default5]:[rank5]: return func(*args, **kwargs)
428
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 1932, in recv
429
+ [default5]:[rank5]: pg.recv([tensor], group_src_rank, tag).wait()
430
+ [default5]:[rank5]: torch.distributed.DistBackendError: NCCL communicator was aborted on rank 1.
431
+ [default7]:[rank7]:[E ProcessGroupNCCL.cpp:1537] [PG 4 Rank 1] Timeout at NCCL work: 27658, last enqueued NCCL work: 27658, last completed NCCL work: 27657.
432
+ [default7]:[rank7]:[E ProcessGroupNCCL.cpp:577] [Rank 1] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
433
+ [default7]:[rank7]:[E ProcessGroupNCCL.cpp:583] [Rank 1] To avoid data inconsistency, we are taking the entire process down.
434
+ [default7]:[rank7]:[E ProcessGroupNCCL.cpp:1414] [PG 4 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600027 milliseconds before timing out.
435
+ [default7]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
436
+ [default7]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f76be926897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
437
+ [default7]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f76bfbffc62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
438
+ [default7]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f76bfc04a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
439
+ [default7]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f76bfc05dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
440
+ [default7]:frame #4: <unknown function> + 0xd3e95 (0x7f770b69ee95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
441
+ [default7]:frame #5: <unknown function> + 0x8609 (0x7f77106e5609 in /lib/x86_64-linux-gnu/libpthread.so.0)
442
+ [default7]:frame #6: clone + 0x43 (0x7f77104b0353 in /lib/x86_64-linux-gnu/libc.so.6)
443
+ [default7]:
444
+ [default7]:terminate called after throwing an instance of 'c10::DistBackendError'
445
+ [default7]: what(): [PG 4 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600027 milliseconds before timing out.
446
+ [default7]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
447
+ [default7]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f76be926897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
448
+ [default7]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f76bfbffc62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
449
+ [default7]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f76bfc04a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
450
+ [default7]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f76bfc05dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
451
+ [default7]:frame #4: <unknown function> + 0xd3e95 (0x7f770b69ee95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
452
+ [default7]:frame #5: <unknown function> + 0x8609 (0x7f77106e5609 in /lib/x86_64-linux-gnu/libpthread.so.0)
453
+ [default7]:frame #6: clone + 0x43 (0x7f77104b0353 in /lib/x86_64-linux-gnu/libc.so.6)
454
+ [default7]:
455
+ [default7]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
456
+ [default7]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f76be926897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
457
+ [default7]:frame #1: <unknown function> + 0xe32119 (0x7f76bf889119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
458
+ [default7]:frame #2: <unknown function> + 0xd3e95 (0x7f770b69ee95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
459
+ [default7]:frame #3: <unknown function> + 0x8609 (0x7f77106e5609 in /lib/x86_64-linux-gnu/libpthread.so.0)
460
+ [default7]:frame #4: clone + 0x43 (0x7f77104b0353 in /lib/x86_64-linux-gnu/libc.so.6)
461
+ [default7]:
462
+ [default6]:[rank6]:[E ProcessGroupNCCL.cpp:1537] [PG 4 Rank 1] Timeout at NCCL work: 27658, last enqueued NCCL work: 27658, last completed NCCL work: 27657.
463
+ [default6]:[rank6]:[E ProcessGroupNCCL.cpp:577] [Rank 1] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
464
+ [default6]:[rank6]:[E ProcessGroupNCCL.cpp:583] [Rank 1] To avoid data inconsistency, we are taking the entire process down.
465
+ [default6]:[rank6]:[E ProcessGroupNCCL.cpp:1414] [PG 4 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600071 milliseconds before timing out.
466
+ [default6]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
467
+ [default6]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f521e3f8897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
468
+ [default6]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f521f6d1c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
469
+ [default6]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f521f6d6a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
470
+ [default6]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f521f6d7dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
471
+ [default6]:frame #4: <unknown function> + 0xd3e95 (0x7f526b170e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
472
+ [default6]:frame #5: <unknown function> + 0x8609 (0x7f52701b7609 in /lib/x86_64-linux-gnu/libpthread.so.0)
473
+ [default6]:frame #6: clone + 0x43 (0x7f526ff82353 in /lib/x86_64-linux-gnu/libc.so.6)
474
+ [default6]:
475
+ [default6]:terminate called after throwing an instance of 'c10::DistBackendError'
476
+ [default6]: what(): [PG 4 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600071 milliseconds before timing out.
477
+ [default6]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
478
+ [default6]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f521e3f8897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
479
+ [default6]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f521f6d1c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
480
+ [default6]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f521f6d6a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
481
+ [default6]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f521f6d7dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
482
+ [default6]:frame #4: <unknown function> + 0xd3e95 (0x7f526b170e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
483
+ [default6]:frame #5: <unknown function> + 0x8609 (0x7f52701b7609 in /lib/x86_64-linux-gnu/libpthread.so.0)
484
+ [default6]:frame #6: clone + 0x43 (0x7f526ff82353 in /lib/x86_64-linux-gnu/libc.so.6)
485
+ [default6]:
486
+ [default6]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
487
+ [default6]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f521e3f8897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
488
+ [default6]:frame #1: <unknown function> + 0xe32119 (0x7f521f35b119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
489
+ [default6]:frame #2: <unknown function> + 0xd3e95 (0x7f526b170e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
490
+ [default6]:frame #3: <unknown function> + 0x8609 (0x7f52701b7609 in /lib/x86_64-linux-gnu/libpthread.so.0)
491
+ [default6]:frame #4: clone + 0x43 (0x7f526ff82353 in /lib/x86_64-linux-gnu/libc.so.6)
492
+ [default6]:
493
+ [default4]:[rank4]:[E ProcessGroupNCCL.cpp:1537] [PG 4 Rank 1] Timeout at NCCL work: 27658, last enqueued NCCL work: 27658, last completed NCCL work: 27657.
494
+ [default4]:[rank4]:[E ProcessGroupNCCL.cpp:577] [Rank 1] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
495
+ [default4]:[rank4]:[E ProcessGroupNCCL.cpp:583] [Rank 1] To avoid data inconsistency, we are taking the entire process down.
496
+ [default4]:[rank4]:[E ProcessGroupNCCL.cpp:1414] [PG 4 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600036 milliseconds before timing out.
497
+ [default4]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
498
+ [default4]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f34dc454897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
499
+ [default4]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f34dd72dc62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
500
+ [default4]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f34dd732a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
501
+ [default4]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f34dd733dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
502
+ [default4]:frame #4: <unknown function> + 0xd3e95 (0x7f35291cce95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
503
+ [default4]:frame #5: <unknown function> + 0x8609 (0x7f352e213609 in /lib/x86_64-linux-gnu/libpthread.so.0)
504
+ [default4]:frame #6: clone + 0x43 (0x7f352dfde353 in /lib/x86_64-linux-gnu/libc.so.6)
505
+ [default4]:
506
+ [default4]:terminate called after throwing an instance of 'c10::DistBackendError'
507
+ [default4]: what(): [PG 4 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600036 milliseconds before timing out.
508
+ [default4]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
509
+ [default4]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f34dc454897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
510
+ [default4]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f34dd72dc62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
511
+ [default4]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f34dd732a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
512
+ [default4]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f34dd733dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
513
+ [default4]:frame #4: <unknown function> + 0xd3e95 (0x7f35291cce95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
514
+ [default4]:frame #5: <unknown function> + 0x8609 (0x7f352e213609 in /lib/x86_64-linux-gnu/libpthread.so.0)
515
+ [default4]:frame #6: clone + 0x43 (0x7f352dfde353 in /lib/x86_64-linux-gnu/libc.so.6)
516
+ [default4]:
517
+ [default4]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
518
+ [default4]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f34dc454897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
519
+ [default4]:frame #1: <unknown function> + 0xe32119 (0x7f34dd3b7119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
520
+ [default4]:frame #2: <unknown function> + 0xd3e95 (0x7f35291cce95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
521
+ [default4]:frame #3: <unknown function> + 0x8609 (0x7f352e213609 in /lib/x86_64-linux-gnu/libpthread.so.0)
522
+ [default4]:frame #4: clone + 0x43 (0x7f352dfde353 in /lib/x86_64-linux-gnu/libc.so.6)
523
+ [default4]:
524
+ [default5]:[rank5]:[E ProcessGroupNCCL.cpp:1537] [PG 4 Rank 1] Timeout at NCCL work: 27658, last enqueued NCCL work: 27658, last completed NCCL work: 27657.
525
+ [default5]:[rank5]:[E ProcessGroupNCCL.cpp:577] [Rank 1] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
526
+ [default5]:[rank5]:[E ProcessGroupNCCL.cpp:583] [Rank 1] To avoid data inconsistency, we are taking the entire process down.
527
+ [default5]:[rank5]:[E ProcessGroupNCCL.cpp:1414] [PG 4 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600003 milliseconds before timing out.
528
+ [default5]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
529
+ [default5]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f7ea2b1e897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
530
+ [default5]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f7ea3df7c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
531
+ [default5]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f7ea3dfca80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
532
+ [default5]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f7ea3dfddcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
533
+ [default5]:frame #4: <unknown function> + 0xd3e95 (0x7f7eef896e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
534
+ [default5]:frame #5: <unknown function> + 0x8609 (0x7f7ef48dd609 in /lib/x86_64-linux-gnu/libpthread.so.0)
535
+ [default5]:frame #6: clone + 0x43 (0x7f7ef46a8353 in /lib/x86_64-linux-gnu/libc.so.6)
536
+ [default5]:
537
+ [default5]:terminate called after throwing an instance of 'c10::DistBackendError'
538
+ [default5]: what(): [PG 4 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=27658, OpType=RECV, NumelIn=7, NumelOut=7, Timeout(ms)=600000) ran for 600003 milliseconds before timing out.
539
+ [default5]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
540
+ [default5]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f7ea2b1e897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
541
+ [default5]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f7ea3df7c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
542
+ [default5]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f7ea3dfca80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
543
+ [default5]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f7ea3dfddcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
544
+ [default5]:frame #4: <unknown function> + 0xd3e95 (0x7f7eef896e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
545
+ [default5]:frame #5: <unknown function> + 0x8609 (0x7f7ef48dd609 in /lib/x86_64-linux-gnu/libpthread.so.0)
546
+ [default5]:frame #6: clone + 0x43 (0x7f7ef46a8353 in /lib/x86_64-linux-gnu/libc.so.6)
547
+ [default5]:
548
+ [default5]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
549
+ [default5]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f7ea2b1e897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
550
+ [default5]:frame #1: <unknown function> + 0xe32119 (0x7f7ea3a81119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
551
+ [default5]:frame #2: <unknown function> + 0xd3e95 (0x7f7eef896e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
552
+ [default5]:frame #3: <unknown function> + 0x8609 (0x7f7ef48dd609 in /lib/x86_64-linux-gnu/libpthread.so.0)
553
+ [default5]:frame #4: clone + 0x43 (0x7f7ef46a8353 in /lib/x86_64-linux-gnu/libc.so.6)
554
+ [default5]:
555
+ W0703 21:27:06.662000 139917486880576 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 219616 closing signal SIGTERM
556
+ W0703 21:27:06.662000 139917486880576 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 219617 closing signal SIGTERM
557
+ W0703 21:27:06.662000 139917486880576 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 219618 closing signal SIGTERM
558
+ W0703 21:27:06.664000 139917486880576 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 219619 closing signal SIGTERM
559
+ E0703 21:27:12.835000 139917486880576 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: -6) local_rank: 4 (pid: 219620) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
560
+ Traceback (most recent call last):
561
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
562
+ sys.exit(main())
563
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
564
+ return f(*args, **kwargs)
565
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
566
+ run(args)
567
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
568
+ elastic_launch(
569
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
570
+ return launch_agent(self._config, self._entrypoint, list(args))
571
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
572
+ raise ChildFailedError(
573
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
574
+ ============================================================
575
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
576
+ ------------------------------------------------------------
577
+ Failures:
578
+ [1]:
579
+ time : 2024-07-03_21:27:06
580
+ host : ip-26-0-174-36.ec2.internal
581
+ rank : 5 (local_rank: 5)
582
+ exitcode : -6 (pid: 219621)
583
+ error_file: <N/A>
584
+ traceback : Signal 6 (SIGABRT) received by PID 219621
585
+ [2]:
586
+ time : 2024-07-03_21:27:06
587
+ host : ip-26-0-174-36.ec2.internal
588
+ rank : 6 (local_rank: 6)
589
+ exitcode : -6 (pid: 219622)
590
+ error_file: <N/A>
591
+ traceback : Signal 6 (SIGABRT) received by PID 219622
592
+ [3]:
593
+ time : 2024-07-03_21:27:06
594
+ host : ip-26-0-174-36.ec2.internal
595
+ rank : 7 (local_rank: 7)
596
+ exitcode : -6 (pid: 219623)
597
+ error_file: <N/A>
598
+ traceback : Signal 6 (SIGABRT) received by PID 219623
599
+ ------------------------------------------------------------
600
+ Root Cause (first observed failure):
601
+ [0]:
602
+ time : 2024-07-03_21:27:06
603
+ host : ip-26-0-174-36.ec2.internal
604
+ rank : 4 (local_rank: 4)
605
+ exitcode : -6 (pid: 219620)
606
+ error_file: <N/A>
607
+ traceback : Signal 6 (SIGABRT) received by PID 219620
608
+ ============================================================
609
+ srun: error: ip-26-0-174-36: task 0: Exited with exit code 1
610
+ Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
llama-1B/8_GPUS/dp-1_tp-4_pp-2_mbz-2/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ timeout