3outeille HF staff commited on
Commit
0ed0f9f
1 Parent(s): 2348abf

Upload llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16

Browse files
llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/bench.slurm ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_cluster
4
+ #SBATCH --time=02:00:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=1
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/log.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/log.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
38
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
39
+ echo python3 version = $(python3 --version)
40
+ echo "========================"
41
+
42
+ # Slurm stuff
43
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
44
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
45
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
46
+
47
+ export TMPDIR=/scratch
48
+ export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
49
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
50
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
51
+
52
+ huggingface-cli login --token $HUGGINGFACE_TOKEN
53
+
54
+
55
+ NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
56
+ CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/config.yaml"
57
+
58
+ LAUNCHER="torchrun \
59
+ --nproc_per_node 8 \
60
+ --nnodes 1 \
61
+ --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
62
+ --rdzv_backend c10d \
63
+ --max_restarts 0 \
64
+ --tee 3 \
65
+ --node_rank ${SLURM_PROCID}"
66
+
67
+ # Checkout the bench_cluster branch
68
+ cd $NANOTRON_REPO
69
+ git checkout bench_cluster
70
+ cd ..
71
+ # Get the current job ID
72
+ job_id=${SLURM_JOB_ID}
73
+
74
+ # Update status to "pending" or "running" in the background
75
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/status.txt &
76
+
77
+ # Run the main command
78
+ srun -u $LAUNCHER $CMD
79
+ exit_status=$?
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/log.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/log.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/status.txt
89
+ elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/log.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/status.txt
93
+ fi
94
+ fi
95
+
96
+ # Run the report script if the job completed successfully
97
+ if [ $exit_status -eq 0 ]; then
98
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16 --is_logs
99
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16 --is_profiler
100
+ fi
101
+
102
+
103
+ # Push to hub the folder using huggingface_cli
104
+ huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16 llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16 --commit-message "Upload llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16"
105
+
106
+ # Verify the upload
107
+ if [ $? -eq 0 ]; then
108
+ echo "Uploading to Huggingface Hub successful"
109
+ else
110
+ echo "Failed to upload to Huggingface Hub"
111
+ fi
llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/config.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ general:
2
+ project: bench_cluster
3
+ seed: 42
4
+ model:
5
+ ddp_bucket_cap_mb: 25
6
+ dtype: bfloat16
7
+ init_method:
8
+ std: 0.025
9
+ make_vocab_size_divisible_by: 1
10
+ model_config:
11
+ bos_token_id: 1
12
+ eos_token_id: 2
13
+ hidden_act: silu
14
+ hidden_size: 2048
15
+ initializer_range: 0.02
16
+ intermediate_size: 4096
17
+ is_llama_config: true
18
+ max_position_embeddings: 4096
19
+ num_attention_heads: 32
20
+ num_hidden_layers: 24
21
+ num_key_value_heads: 32
22
+ pad_token_id: null
23
+ pretraining_tp: 1
24
+ rms_norm_eps: 1.0e-05
25
+ rope_scaling: null
26
+ rope_theta: 10000.0
27
+ tie_word_embeddings: true
28
+ use_cache: true
29
+ vocab_size: 50257
30
+ optimizer:
31
+ accumulate_grad_in_fp32: true
32
+ clip_grad: 1.0
33
+ learning_rate_scheduler:
34
+ learning_rate: 0.0001
35
+ lr_decay_style: linear
36
+ lr_warmup_style: linear
37
+ lr_warmup_steps: 1
38
+ min_decay_lr: 1.0e-05
39
+ optimizer_factory:
40
+ adam_beta1: 0.9
41
+ adam_beta2: 0.95
42
+ adam_eps: 1.0e-08
43
+ name: adamW
44
+ torch_adam_is_fused: true
45
+ weight_decay: 0.01
46
+ zero_stage: 1
47
+ parallelism:
48
+ dp: 1
49
+ expert_parallel_size: 1
50
+ pp: 8
51
+ pp_engine: 1f1b
52
+ tp: 1
53
+ tp_linear_async_communication: false
54
+ tp_mode: REDUCE_SCATTER
55
+ profiler:
56
+ profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16
57
+ tokenizer:
58
+ tokenizer_max_length: null
59
+ tokenizer_name_or_path: openai-community/gpt2
60
+ tokenizer_revision: null
61
+ data_stages:
62
+ - name: Training Stage
63
+ start_training_step: 1
64
+ data:
65
+ dataset:
66
+ dataset_overwrite_cache: false
67
+ dataset_processing_num_proc_per_process: 64
68
+ hf_dataset_config_name: null
69
+ hf_dataset_or_datasets: roneneldan/TinyStories
70
+ hf_dataset_splits: train
71
+ text_column_name: text
72
+ num_loading_workers: 0
73
+ seed: 42
74
+ lighteval: null
75
+ tokens:
76
+ train_steps: 20
77
+ val_check_interval: -1
78
+ batch_accumulation_per_replica: 64
79
+ limit_test_batches: 0
80
+ limit_val_batches: 0
81
+ micro_batch_size: 16
82
+ sequence_length: 4096
83
+ logging:
84
+ iteration_step_info_interval: 1
85
+ log_level: info
86
+ log_level_replica: info
87
+ checkpoints:
88
+ checkpoint_interval: 100000
89
+ checkpoints_path: /dev/null
90
+ resume_checkpoint_path: null
llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/log.out ADDED
@@ -0,0 +1,626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ========================
2
+ START TIME: Wed Jul 3 23:09:08 UTC 2024
3
+ python3 version = Python 3.10.14
4
+ ========================
5
+ The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
6
+ Token is valid (permission: write).
7
+ Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
8
+ Login successful
9
+ Already on 'bench_cluster'
10
+ M examples/config_tiny_llama.py
11
+ M examples/config_tiny_llama.yaml
12
+ M examples/train_tiny_llama.sh
13
+ M src/nanotron/models/llama.py
14
+ M src/nanotron/trainer.py
15
+ Your branch is up to date with 'origin/bench_cluster'.
16
+ Job status: RUNNING
17
+ W0703 23:09:14.260000 140032670197568 torch/distributed/run.py:757]
18
+ W0703 23:09:14.260000 140032670197568 torch/distributed/run.py:757] *****************************************
19
+ W0703 23:09:14.260000 140032670197568 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
20
+ W0703 23:09:14.260000 140032670197568 torch/distributed/run.py:757] *****************************************
21
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config:
22
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config(general=GeneralArgs(project='bench_cluster',
23
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: run='%date_%jobid',
24
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
25
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: step=None,
26
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: consumed_train_samples=None,
27
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: benchmark_csv_path=None,
28
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ignore_sanity_checks=True),
29
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: parallelism=ParallelismArgs(dp=1,
30
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp=8,
31
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp=1,
32
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f312b8a0880>,
33
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
34
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_linear_async_communication=False,
35
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: expert_parallel_size=1),
36
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
37
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=2,
38
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
39
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=2048,
40
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
41
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=4096,
42
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
43
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
44
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=32,
45
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=24,
46
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=32,
47
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
48
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
49
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
50
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
51
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
52
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=True,
53
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
54
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=50257),
55
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: init_method=RandomInit(std=0.025),
56
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dtype=torch.bfloat16,
57
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: make_vocab_size_divisible_by=1,
58
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ddp_bucket_cap_mb=25),
59
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
60
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_revision=None,
61
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_max_length=None),
62
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
63
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoint_interval=100000,
64
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: save_initial_state=False,
65
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: resume_checkpoint_path=None,
66
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints_path_is_shared_file_system=False),
67
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: logging=LoggingArgs(log_level='info',
68
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: log_level_replica='info',
69
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration_step_info_interval=1),
70
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokens=TokensArgs(sequence_length=4096,
71
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: train_steps=20,
72
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: micro_batch_size=16,
73
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: batch_accumulation_per_replica=64,
74
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: val_check_interval=-1,
75
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_val_batches=0,
76
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_test_batches=0),
77
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
78
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta1=0.9,
79
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta2=0.95,
80
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: torch_adam_is_fused=True,
81
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: name='adamW'),
82
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: zero_stage=1,
83
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: weight_decay=0.01,
84
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: clip_grad=1.0,
85
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: accumulate_grad_in_fp32=True,
86
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
87
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_steps=1,
88
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_style='linear',
89
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_style='linear',
90
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_steps=19,
91
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_starting_step=None,
92
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: min_decay_lr=1e-05)),
93
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data_stages=[DatasetStageArgs(name='Training Stage',
94
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: start_training_step=1,
95
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
96
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hf_dataset_splits='train',
97
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hf_dataset_config_name=None,
98
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dataset_processing_num_proc_per_process=64,
99
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dataset_overwrite_cache=False,
100
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: text_column_name='text'),
101
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
102
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_loading_workers=0))],
103
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16')),
104
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lighteval=None)
105
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Model Config:
106
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: LlamaConfig(bos_token_id=1,
107
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=2,
108
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
109
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=2048,
110
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
111
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=4096,
112
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
113
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
114
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=32,
115
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=24,
116
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=32,
117
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
118
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
119
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
120
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
121
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
122
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=True,
123
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
124
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=50257)
125
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Building model..
126
+ [default0]:07/03/2024 23:09:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Setting PP block ranks...
127
+ [default4]:07/03/2024 23:09:50 [INFO|DP=0|PP=4|TP=0|ip-26-0-160-225]: Local number of parameters: 126M (240.02MiB)
128
+ [default4]:07/03/2024 23:09:50 [INFO|DP=0|PP=4|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 243.03MiB. Peak allocated: 245.06MiB Peak reserved: 262.00MiB
129
+ [default4]:07/03/2024 23:09:50 [INFO|DP=0|PP=4|TP=0|ip-26-0-160-225]: No checkpoint path provided.
130
+ [default6]:07/03/2024 23:09:50 [INFO|DP=0|PP=6|TP=0|ip-26-0-160-225]: Local number of parameters: 168M (320.03MiB)
131
+ [default6]:07/03/2024 23:09:50 [INFO|DP=0|PP=6|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 324.04MiB. Peak allocated: 326.07MiB Peak reserved: 336.00MiB
132
+ [default6]:07/03/2024 23:09:50 [INFO|DP=0|PP=6|TP=0|ip-26-0-160-225]: No checkpoint path provided.
133
+ [default2]:07/03/2024 23:09:50 [INFO|DP=0|PP=2|TP=0|ip-26-0-160-225]: Local number of parameters: 126M (240.02MiB)
134
+ [default2]:07/03/2024 23:09:50 [INFO|DP=0|PP=2|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 243.03MiB. Peak allocated: 245.06MiB Peak reserved: 262.00MiB
135
+ [default2]:07/03/2024 23:09:50 [INFO|DP=0|PP=2|TP=0|ip-26-0-160-225]: No checkpoint path provided.
136
+ [default5]:07/03/2024 23:09:50 [INFO|DP=0|PP=5|TP=0|ip-26-0-160-225]: Local number of parameters: 126M (240.02MiB)
137
+ [default5]:07/03/2024 23:09:50 [INFO|DP=0|PP=5|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 243.03MiB. Peak allocated: 245.06MiB Peak reserved: 262.00MiB
138
+ [default5]:07/03/2024 23:09:50 [INFO|DP=0|PP=5|TP=0|ip-26-0-160-225]: No checkpoint path provided.
139
+ [default1]:07/03/2024 23:09:50 [INFO|DP=0|PP=1|TP=0|ip-26-0-160-225]: Local number of parameters: 126M (240.02MiB)
140
+ [default1]:07/03/2024 23:09:50 [INFO|DP=0|PP=1|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 243.03MiB. Peak allocated: 245.06MiB Peak reserved: 262.00MiB
141
+ [default1]:07/03/2024 23:09:50 [INFO|DP=0|PP=1|TP=0|ip-26-0-160-225]: No checkpoint path provided.
142
+ [default0]:07/03/2024 23:09:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Total number of parameters: 1.21G (2312.82MiB)
143
+ [default0]:07/03/2024 23:09:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Local number of parameters: 271M (516.35MiB)
144
+ [default0]:07/03/2024 23:09:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 520.36MiB. Peak allocated: 522.39MiB Peak reserved: 534.00MiB
145
+ [default0]:07/03/2024 23:09:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: No checkpoint path provided.
146
+ [default0]:07/03/2024 23:09:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Parametrizing model parameters using StandardParametrizator
147
+ [default3]:07/03/2024 23:09:50 [INFO|DP=0|PP=3|TP=0|ip-26-0-160-225]: Local number of parameters: 168M (320.03MiB)
148
+ [default3]:07/03/2024 23:09:50 [INFO|DP=0|PP=3|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 324.04MiB. Peak allocated: 326.07MiB Peak reserved: 336.00MiB
149
+ [default3]:07/03/2024 23:09:50 [INFO|DP=0|PP=3|TP=0|ip-26-0-160-225]: No checkpoint path provided.
150
+ [default7]:07/03/2024 23:09:50 [INFO|DP=0|PP=7|TP=0|ip-26-0-160-225]: Local number of parameters: 103M (196.32MiB)
151
+ [default7]:07/03/2024 23:09:50 [INFO|DP=0|PP=7|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 196.33MiB. Peak allocated: 196.33MiB Peak reserved: 200.00MiB
152
+ [default7]:07/03/2024 23:09:50 [INFO|DP=0|PP=7|TP=0|ip-26-0-160-225]: No checkpoint path provided.
153
+ [default0]:07/03/2024 23:09:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Optimizer Building] Using LearningRateForSP as learning rate
154
+ [default0]:07/03/2024 23:09:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] Size of optimizer params per rank:
155
+ [default0]:07/03/2024 23:09:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] DP Rank 0 has 271M out of 271M (100.00%) params' optimizer states
156
+ [default0]:07/03/2024 23:09:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
157
+ [default0]:07/03/2024 23:09:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Using `datasets` library
158
+ [default0]:07/03/2024 23:09:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
159
+ [default0]:07/03/2024 23:09:52 [WARNING|DP=0|PP=0|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
160
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
161
+ [default0]:07/03/2024 23:09:54 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] There are 1 training stages
162
+ [default0]:07/03/2024 23:09:54 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Stage Training Stage] start from step 1
163
+ [default0]:07/03/2024 23:09:54 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]:
164
+ [default0]:07/03/2024 23:09:54 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Start training] datetime: 2024-07-03 23:09:54.875055 | mbs: 16 | grad_accum: 64 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
165
+ [default0]:07/03/2024 23:09:54 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
166
+ [default0]:07/03/2024 23:09:54 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2585.75MiB. Peak allocated 2585.75MiB. Peak reserved: 2602.00MiB
167
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
168
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
169
+ [default5]:07/03/2024 23:09:55 [WARNING|DP=0|PP=5|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
170
+ [default6]:07/03/2024 23:09:55 [WARNING|DP=0|PP=6|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
171
+ [default7]:07/03/2024 23:09:55 [WARNING|DP=0|PP=7|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
172
+ [default2]:07/03/2024 23:09:55 [WARNING|DP=0|PP=2|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
173
+ [default3]:07/03/2024 23:09:55 [WARNING|DP=0|PP=3|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
174
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
175
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
176
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
177
+ [default6]:07/03/2024 23:10:05 [WARNING|DP=0|PP=6|TP=0|ip-26-0-160-225]: Using the latest cached version of the dataset since roneneldan/TinyStories couldn't be found on the Hugging Face Hub
178
+ [default6]:07/03/2024 23:10:05 [WARNING|DP=0|PP=6|TP=0|ip-26-0-160-225]: Found the latest cached dataset configuration 'default' at /admin/home/ferdinand_mom/.cache/roneneldan___tiny_stories/default/0.0.0/691b0d9bd48ade766778c940011ca1c549f6359b (last modified on Mon Jun 24 07:59:52 2024).
179
+ [default6]:Using the latest cached version of the dataset since roneneldan/TinyStories couldn't be found on the Hugging Face Hub
180
+ [default6]:Found the latest cached dataset configuration 'default' at /admin/home/ferdinand_mom/.cache/roneneldan___tiny_stories/default/0.0.0/691b0d9bd48ade766778c940011ca1c549f6359b (last modified on Mon Jun 24 07:59:52 2024).
181
+ [default4]:07/03/2024 23:10:19 [WARNING|DP=0|PP=4|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
182
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
183
+ [default1]:07/03/2024 23:10:37 [WARNING|DP=0|PP=1|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
184
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
185
+ [default0]:[rank0]: Traceback (most recent call last):
186
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
187
+ [default0]:[rank0]: trainer.train(dataloader)
188
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
189
+ [default0]:[rank0]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
190
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
191
+ [default0]:[rank0]: outputs = self.pipeline_engine.train_batch_iter(
192
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 252, in train_batch_iter
193
+ [default0]:[rank0]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
194
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
195
+ [default0]:[rank0]: output = model(**micro_batch)
196
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
197
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
198
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
199
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
200
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
201
+ [default0]:[rank0]: sharded_logits = self.model(
202
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
203
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
204
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
205
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
206
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
207
+ [default0]:[rank0]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
208
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
209
+ [default0]:[rank0]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
210
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
211
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
212
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
213
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
214
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
215
+ [default0]:[rank0]: output = self.pp_block(**new_kwargs)
216
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
217
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
218
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
219
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
220
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
221
+ [default0]:[rank0]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
222
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
223
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
224
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
225
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
226
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 389, in forward
227
+ [default0]:[rank0]: .contiguous()
228
+ [default0]:[rank0]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 768.00 MiB. GPU
229
+ [default5]:[rank5]: Traceback (most recent call last):
230
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
231
+ [default5]:[rank5]: trainer.train(dataloader)
232
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
233
+ [default5]:[rank5]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
234
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
235
+ [default5]:[rank5]: outputs = self.pipeline_engine.train_batch_iter(
236
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 252, in train_batch_iter
237
+ [default5]:[rank5]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
238
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
239
+ [default5]:[rank5]: output = model(**micro_batch)
240
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
241
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
242
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
243
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
244
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
245
+ [default5]:[rank5]: sharded_logits = self.model(
246
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
247
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
248
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
249
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
250
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
251
+ [default5]:[rank5]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
252
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
253
+ [default5]:[rank5]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
254
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
255
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
256
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
257
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
258
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 126, in forward
259
+ [default5]:[rank5]: new_kwargs[name] = recv_from_pipeline_state_buffer(
260
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/functional.py", line 117, in recv_from_pipeline_state_buffer
261
+ [default5]:[rank5]: pipeline_state.run_communication()
262
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 150, in run_communication
263
+ [default5]:[rank5]: recv_activation_tensor = recv_activation()
264
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 31, in __call__
265
+ [default5]:[rank5]: return self.p2p.recv_tensors(num_tensors=1, from_rank=self.from_rank)[0]
266
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 353, in recv_tensors
267
+ [default5]:[rank5]: buffers, futures = self.irecv_tensors(num_tensors=num_tensors, from_rank=from_rank, tag=tag)
268
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 326, in irecv_tensors
269
+ [default5]:[rank5]: meta = self._recv_meta(from_rank=from_rank, tag=tag)
270
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 246, in _recv_meta
271
+ [default5]:[rank5]: dist.recv(
272
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/c10d_logger.py", line 75, in wrapper
273
+ [default5]:[rank5]: return func(*args, **kwargs)
274
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 1932, in recv
275
+ [default5]:[rank5]: pg.recv([tensor], group_src_rank, tag).wait()
276
+ [default5]:[rank5]: torch.distributed.DistBackendError: [5] is setting up NCCL communicator and retrieving ncclUniqueId from [0] via c10d key-value store by key '4:5', but store->get('4:5') got error: Connection reset by peer
277
+ [default5]:[rank5]: Exception raised from recvBytes at ../torch/csrc/distributed/c10d/Utils.hpp:672 (most recent call first):
278
+ [default5]:[rank5]: frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f22de669897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
279
+ [default5]:[rank5]: frame #1: <unknown function> + 0x5b3a23e (0x7f231818623e in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
280
+ [default5]:[rank5]: frame #2: c10d::TCPStore::doWait(c10::ArrayRef<std::string>, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x2c7 (0x7f2318180c87 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
281
+ [default5]:[rank5]: frame #3: c10d::TCPStore::doGet(std::string const&) + 0x32 (0x7f2318180f82 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
282
+ [default5]:[rank5]: frame #4: c10d::TCPStore::get(std::string const&) + 0xa1 (0x7f2318181fd1 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
283
+ [default5]:[rank5]: frame #5: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f2318136371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
284
+ [default5]:[rank5]: frame #6: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f2318136371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
285
+ [default5]:[rank5]: frame #7: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f2318136371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
286
+ [default5]:[rank5]: frame #8: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f2318136371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
287
+ [default5]:[rank5]: frame #9: c10d::ProcessGroupNCCL::broadcastUniqueNCCLID(ncclUniqueId*, bool, std::string const&, int) + 0xa9 (0x7f22df943189 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
288
+ [default5]:[rank5]: frame #10: c10d::ProcessGroupNCCL::getNCCLComm(std::string const&, c10::Device&, c10d::OpType, int, bool) + 0xc50 (0x7f22df94a610 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
289
+ [default5]:[rank5]: frame #11: c10d::ProcessGroupNCCL::recv(std::vector<at::Tensor, std::allocator<at::Tensor> >&, int, int) + 0x5f8 (0x7f22df969978 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
290
+ [default5]:[rank5]: frame #12: <unknown function> + 0x5adc309 (0x7f2318128309 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
291
+ [default5]:[rank5]: frame #13: <unknown function> + 0x5ae6f10 (0x7f2318132f10 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
292
+ [default5]:[rank5]: frame #14: <unknown function> + 0x5ae6fa5 (0x7f2318132fa5 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
293
+ [default5]:[rank5]: frame #15: <unknown function> + 0x5124446 (0x7f2317770446 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
294
+ [default5]:[rank5]: frame #16: <unknown function> + 0x1acf4b8 (0x7f231411b4b8 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
295
+ [default5]:[rank5]: frame #17: <unknown function> + 0x5aee004 (0x7f231813a004 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
296
+ [default5]:[rank5]: frame #18: <unknown function> + 0x5af36b5 (0x7f231813f6b5 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
297
+ [default5]:[rank5]: frame #19: <unknown function> + 0xd2631e (0x7f232ad2931e in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
298
+ [default5]:[rank5]: frame #20: <unknown function> + 0x47def4 (0x7f232a480ef4 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
299
+ [default5]:[rank5]: frame #21: <unknown function> + 0x1445a6 (0x55cf8973b5a6 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
300
+ [default5]:[rank5]: frame #22: _PyObject_MakeTpCall + 0x26b (0x55cf89734a6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
301
+ [default5]:[rank5]: frame #23: <unknown function> + 0x150866 (0x55cf89747866 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
302
+ [default5]:[rank5]: frame #24: _PyEval_EvalFrameDefault + 0x4c12 (0x55cf89730142 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
303
+ [default5]:[rank5]: frame #25: _PyFunction_Vectorcall + 0x6c (0x55cf8973ba2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
304
+ [default5]:[rank5]: frame #26: PyObject_Call + 0xbc (0x55cf89747f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
305
+ [default5]:[rank5]: frame #27: _PyEval_EvalFrameDefault + 0x2d83 (0x55cf8972e2b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
306
+ [default5]:[rank5]: frame #28: _PyFunction_Vectorcall + 0x6c (0x55cf8973ba2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
307
+ [default5]:[rank5]: frame #29: _PyEval_EvalFrameDefault + 0x13ca (0x55cf8972c8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
308
+ [default5]:[rank5]: frame #30: <unknown function> + 0x150582 (0x55cf89747582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
309
+ [default5]:[rank5]: frame #31: _PyEval_EvalFrameDefault + 0x13ca (0x55cf8972c8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
310
+ [default5]:[rank5]: frame #32: <unknown function> + 0x150582 (0x55cf89747582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
311
+ [default5]:[rank5]: frame #33: _PyEval_EvalFrameDefault + 0x13ca (0x55cf8972c8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
312
+ [default5]:[rank5]: frame #34: <unknown function> + 0x150582 (0x55cf89747582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
313
+ [default5]:[rank5]: frame #35: _PyEval_EvalFrameDefault + 0x13ca (0x55cf8972c8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
314
+ [default5]:[rank5]: frame #36: _PyObject_FastCallDictTstate + 0xd0 (0x55cf89733f50 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
315
+ [default5]:[rank5]: frame #37: _PyObject_Call_Prepend + 0x69 (0x55cf89745c39 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
316
+ [default5]:[rank5]: frame #38: <unknown function> + 0x211239 (0x55cf89808239 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
317
+ [default5]:[rank5]: frame #39: _PyObject_MakeTpCall + 0x26b (0x55cf89734a6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
318
+ [default5]:[rank5]: frame #40: _PyEval_EvalFrameDefault + 0x4eb6 (0x55cf897303e6 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
319
+ [default5]:[rank5]: frame #41: _PyFunction_Vectorcall + 0x6c (0x55cf8973ba2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
320
+ [default5]:[rank5]: frame #42: _PyEval_EvalFrameDefault + 0x72c (0x55cf8972bc5c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
321
+ [default5]:[rank5]: frame #43: _PyFunction_Vectorcall + 0x6c (0x55cf8973ba2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
322
+ [default6]:[rank6]: Traceback (most recent call last):
323
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
324
+ [default5]:[rank5]: frame #44: _PyEval_EvalFrameDefault + 0x13ca (0x55cf8972c8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
325
+ [default6]:[rank6]: trainer.train(dataloader)
326
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
327
+ [default6]:[rank6]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
328
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
329
+ [default6]:[rank6]: outputs = self.pipeline_engine.train_batch_iter(
330
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 252, in train_batch_iter
331
+ [default5]:[rank5]: frame #45: <unknown function> + 0x150582 (0x55cf89747582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
332
+ [default6]:[rank6]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
333
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
334
+ [default5]:[rank5]: frame #46: PyObject_Call + 0xbc (0x55cf89747f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
335
+ [default6]:[rank6]: output = model(**micro_batch)
336
+ [default5]:[rank5]: frame #47: _PyEval_EvalFrameDefault + 0x2d83 (0x55cf8972e2b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
337
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
338
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
339
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
340
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
341
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
342
+ [default5]:[rank5]: frame #48: <unknown function> + 0x150582 (0x55cf89747582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
343
+ [default5]:[rank5]: frame #49: PyObject_Call + 0xbc (0x55cf89747f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
344
+ [default6]:[rank6]: sharded_logits = self.model(
345
+ [default5]:[rank5]: frame #50: _PyEval_EvalFrameDefault + 0x2d83 (0x55cf8972e2b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
346
+ [default5]:[rank5]: frame #51: _PyFunction_Vectorcall + 0x6c (0x55cf8973ba2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
347
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
348
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
349
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
350
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
351
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
352
+ [default5]:[rank5]: frame #52: _PyObject_FastCallDictTstate + 0x187 (0x55cf89734007 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
353
+ [default6]:[rank6]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
354
+ [default5]:[rank5]: frame #53: _PyObject_Call_Prepend + 0x69 (0x55cf89745c39 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
355
+ [default5]:[rank5]: frame #54: <unknown function> + 0x211239 (0x55cf89808239 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
356
+ [default5]:[rank5]: frame #55: PyObject_Call + 0x207 (0x55cf89748067 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
357
+ [default5]:[rank5]: frame #56: _PyEval_EvalFrameDefault + 0x2d83 (0x55cf8972e2b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
358
+ [default5]:[rank5]: frame #57: <unknown function> + 0x150582 (0x55cf89747582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
359
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
360
+ [default6]:[rank6]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
361
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
362
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
363
+ [default5]:[rank5]: frame #58: _PyEval_EvalFrameDefault + 0x13ca (0x55cf8972c8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
364
+ [default5]:[rank5]: frame #59: <unknown function> + 0x150582 (0x55cf89747582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
365
+ [default5]:[rank5]: frame #60: PyObject_Call + 0xbc (0x55cf89747f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
366
+ [default5]:[rank5]: frame #61: _PyEval_EvalFrameDefault + 0x2d83 (0x55cf8972e2b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
367
+ [default5]:[rank5]: frame #62: <unknown function> + 0x150582 (0x55cf89747582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
368
+ [default5]:[rank5]: frame #63: PyObject_Call + 0xbc (0x55cf89747f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
369
+ [default5]:[rank5]: . This may indicate a possible application crash on rank 0 or a network set up issue.
370
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
371
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
372
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 126, in forward
373
+ [default6]:[rank6]: new_kwargs[name] = recv_from_pipeline_state_buffer(
374
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/functional.py", line 117, in recv_from_pipeline_state_buffer
375
+ [default6]:[rank6]: pipeline_state.run_communication()
376
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 150, in run_communication
377
+ [default6]:[rank6]: recv_activation_tensor = recv_activation()
378
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 31, in __call__
379
+ [default6]:[rank6]: return self.p2p.recv_tensors(num_tensors=1, from_rank=self.from_rank)[0]
380
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 353, in recv_tensors
381
+ [default6]:[rank6]: buffers, futures = self.irecv_tensors(num_tensors=num_tensors, from_rank=from_rank, tag=tag)
382
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 326, in irecv_tensors
383
+ [default6]:[rank6]: meta = self._recv_meta(from_rank=from_rank, tag=tag)
384
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 246, in _recv_meta
385
+ [default6]:[rank6]: dist.recv(
386
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/c10d_logger.py", line 75, in wrapper
387
+ [default6]:[rank6]: return func(*args, **kwargs)
388
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 1932, in recv
389
+ [default6]:[rank6]: pg.recv([tensor], group_src_rank, tag).wait()
390
+ [default6]:[rank6]: torch.distributed.DistBackendError: [6] is setting up NCCL communicator and retrieving ncclUniqueId from [0] via c10d key-value store by key '5:6', but store->get('5:6') got error: Connection reset by peer
391
+ [default6]:[rank6]: Exception raised from recvBytes at ../torch/csrc/distributed/c10d/Utils.hpp:672 (most recent call first):
392
+ [default6]:[rank6]: frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f641363f897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
393
+ [default6]:[rank6]: frame #1: <unknown function> + 0x5b3a23e (0x7f644d15c23e in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
394
+ [default6]:[rank6]: frame #2: c10d::TCPStore::doWait(c10::ArrayRef<std::string>, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x2c7 (0x7f644d156c87 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
395
+ [default6]:[rank6]: frame #3: c10d::TCPStore::doGet(std::string const&) + 0x32 (0x7f644d156f82 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
396
+ [default6]:[rank6]: frame #4: c10d::TCPStore::get(std::string const&) + 0xa1 (0x7f644d157fd1 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
397
+ [default6]:[rank6]: frame #5: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f644d10c371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
398
+ [default6]:[rank6]: frame #6: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f644d10c371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
399
+ [default6]:[rank6]: frame #7: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f644d10c371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
400
+ [default6]:[rank6]: frame #8: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f644d10c371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
401
+ [default6]:[rank6]: frame #9: c10d::ProcessGroupNCCL::broadcastUniqueNCCLID(ncclUniqueId*, bool, std::string const&, int) + 0xa9 (0x7f6414919189 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
402
+ [default6]:[rank6]: frame #10: c10d::ProcessGroupNCCL::getNCCLComm(std::string const&, c10::Device&, c10d::OpType, int, bool) + 0xc50 (0x7f6414920610 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
403
+ [default6]:[rank6]: frame #11: c10d::ProcessGroupNCCL::recv(std::vector<at::Tensor, std::allocator<at::Tensor> >&, int, int) + 0x5f8 (0x7f641493f978 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
404
+ [default6]:[rank6]: frame #12: <unknown function> + 0x5adc309 (0x7f644d0fe309 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
405
+ [default6]:[rank6]: frame #13: <unknown function> + 0x5ae6f10 (0x7f644d108f10 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
406
+ [default6]:[rank6]: frame #14: <unknown function> + 0x5ae6fa5 (0x7f644d108fa5 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
407
+ [default6]:[rank6]: frame #15: <unknown function> + 0x5124446 (0x7f644c746446 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
408
+ [default6]:[rank6]: frame #16: <unknown function> + 0x1acf4b8 (0x7f64490f14b8 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
409
+ [default6]:[rank6]: frame #17: <unknown function> + 0x5aee004 (0x7f644d110004 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
410
+ [default6]:[rank6]: frame #18: <unknown function> + 0x5af36b5 (0x7f644d1156b5 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
411
+ [default6]:[rank6]: frame #19: <unknown function> + 0xd2631e (0x7f645fcff31e in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
412
+ [default6]:[rank6]: frame #20: <unknown function> + 0x47def4 (0x7f645f456ef4 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
413
+ [default6]:[rank6]: frame #21: <unknown function> + 0x1445a6 (0x55b70a1955a6 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
414
+ [default6]:[rank6]: frame #22: _PyObject_MakeTpCall + 0x26b (0x55b70a18ea6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
415
+ [default6]:[rank6]: frame #23: <unknown function> + 0x150866 (0x55b70a1a1866 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
416
+ [default6]:[rank6]: frame #24: _PyEval_EvalFrameDefault + 0x4c12 (0x55b70a18a142 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
417
+ [default6]:[rank6]: frame #25: _PyFunction_Vectorcall + 0x6c (0x55b70a195a2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
418
+ [default6]:[rank6]: frame #26: PyObject_Call + 0xbc (0x55b70a1a1f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
419
+ [default6]:[rank6]: frame #27: _PyEval_EvalFrameDefault + 0x2d83 (0x55b70a1882b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
420
+ [default6]:[rank6]: frame #28: _PyFunction_Vectorcall + 0x6c (0x55b70a195a2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
421
+ [default6]:[rank6]: frame #29: _PyEval_EvalFrameDefault + 0x13ca (0x55b70a1868fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
422
+ [default6]:[rank6]: frame #30: <unknown function> + 0x150582 (0x55b70a1a1582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
423
+ [default6]:[rank6]: frame #31: _PyEval_EvalFrameDefault + 0x13ca (0x55b70a1868fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
424
+ [default6]:[rank6]: frame #32: <unknown function> + 0x150582 (0x55b70a1a1582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
425
+ [default6]:[rank6]: frame #33: _PyEval_EvalFrameDefault + 0x13ca (0x55b70a1868fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
426
+ [default6]:[rank6]: frame #34: <unknown function> + 0x150582 (0x55b70a1a1582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
427
+ [default6]:[rank6]: frame #35: _PyEval_EvalFrameDefault + 0x13ca (0x55b70a1868fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
428
+ [default6]:[rank6]: frame #36: _PyObject_FastCallDictTstate + 0xd0 (0x55b70a18df50 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
429
+ [default6]:[rank6]: frame #37: _PyObject_Call_Prepend + 0x69 (0x55b70a19fc39 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
430
+ [default6]:[rank6]: frame #38: <unknown function> + 0x211239 (0x55b70a262239 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
431
+ [default6]:[rank6]: frame #39: _PyObject_MakeTpCall + 0x26b (0x55b70a18ea6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
432
+ [default6]:[rank6]: frame #40: _PyEval_EvalFrameDefault + 0x4eb6 (0x55b70a18a3e6 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
433
+ [default6]:[rank6]: frame #41: _PyFunction_Vectorcall + 0x6c (0x55b70a195a2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
434
+ [default6]:[rank6]: frame #42: _PyEval_EvalFrameDefault + 0x72c (0x55b70a185c5c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
435
+ [default6]:[rank6]: frame #43: _PyFunction_Vectorcall + 0x6c (0x55b70a195a2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
436
+ [default6]:[rank6]: frame #44: _PyEval_EvalFrameDefault + 0x13ca (0x55b70a1868fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
437
+ [default6]:[rank6]: frame #45: <unknown function> + 0x150582 (0x55b70a1a1582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
438
+ [default6]:[rank6]: frame #46: PyObject_Call + 0xbc (0x55b70a1a1f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
439
+ [default6]:[rank6]: frame #47: _PyEval_EvalFrameDefault + 0x2d83 (0x55b70a1882b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
440
+ [default6]:[rank6]: frame #48: <unknown function> + 0x150582 (0x55b70a1a1582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
441
+ [default6]:[rank6]: frame #49: PyObject_Call + 0xbc (0x55b70a1a1f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
442
+ [default6]:[rank6]: frame #50: _PyEval_EvalFrameDefault + 0x2d83 (0x55b70a1882b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
443
+ [default6]:[rank6]: frame #51: _PyFunction_Vectorcall + 0x6c (0x55b70a195a2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
444
+ [default6]:[rank6]: frame #52: _PyObject_FastCallDictTstate + 0x187 (0x55b70a18e007 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
445
+ [default6]:[rank6]: frame #53: _PyObject_Call_Prepend + 0x69 (0x55b70a19fc39 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
446
+ [default6]:[rank6]: frame #54: <unknown function> + 0x211239 (0x55b70a262239 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
447
+ [default6]:[rank6]: frame #55: PyObject_Call + 0x207 (0x55b70a1a2067 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
448
+ [default6]:[rank6]: frame #56: _PyEval_EvalFrameDefault + 0x2d83 (0x55b70a1882b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
449
+ [default6]:[rank6]: frame #57: <unknown function> + 0x150582 (0x55b70a1a1582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
450
+ [default6]:[rank6]: frame #58: _PyEval_EvalFrameDefault + 0x13ca (0x55b70a1868fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
451
+ [default6]:[rank6]: frame #59: <unknown function> + 0x150582 (0x55b70a1a1582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
452
+ [default6]:[rank6]: frame #60: PyObject_Call + 0xbc (0x55b70a1a1f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
453
+ [default6]:[rank6]: frame #61: _PyEval_EvalFrameDefault + 0x2d83 (0x55b70a1882b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
454
+ [default6]:[rank6]: frame #62: <unknown function> + 0x150582 (0x55b70a1a1582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
455
+ [default6]:[rank6]: frame #63: PyObject_Call + 0xbc (0x55b70a1a1f1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
456
+ [default6]:[rank6]: . This may indicate a possible application crash on rank 0 or a network set up issue.
457
+ [default7]:[rank7]: Traceback (most recent call last):
458
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
459
+ [default7]:[rank7]: trainer.train(dataloader)
460
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
461
+ [default7]:[rank7]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
462
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
463
+ [default7]:[rank7]: outputs = self.pipeline_engine.train_batch_iter(
464
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
465
+ [default7]:[rank7]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
466
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
467
+ [default7]:[rank7]: output = model(**micro_batch)
468
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
469
+ [default7]:[rank7]: return self._call_impl(*args, **kwargs)
470
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
471
+ [default7]:[rank7]: return forward_call(*args, **kwargs)
472
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
473
+ [default7]:[rank7]: sharded_logits = self.model(
474
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
475
+ [default7]:[rank7]: return self._call_impl(*args, **kwargs)
476
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
477
+ [default7]:[rank7]: return forward_call(*args, **kwargs)
478
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
479
+ [default7]:[rank7]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
480
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 782, in forward_with_hidden_states
481
+ [default7]:[rank7]: hidden_states = self.final_layer_norm(input=hidden_encoder_states["hidden_states"])["hidden_states"]
482
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
483
+ [default7]:[rank7]: return self._call_impl(*args, **kwargs)
484
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
485
+ [default7]:[rank7]: return forward_call(*args, **kwargs)
486
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 126, in forward
487
+ [default7]:[rank7]: new_kwargs[name] = recv_from_pipeline_state_buffer(
488
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/functional.py", line 117, in recv_from_pipeline_state_buffer
489
+ [default7]:[rank7]: pipeline_state.run_communication()
490
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 150, in run_communication
491
+ [default7]:[rank7]: recv_activation_tensor = recv_activation()
492
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/state.py", line 31, in __call__
493
+ [default7]:[rank7]: return self.p2p.recv_tensors(num_tensors=1, from_rank=self.from_rank)[0]
494
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 353, in recv_tensors
495
+ [default7]:[rank7]: buffers, futures = self.irecv_tensors(num_tensors=num_tensors, from_rank=from_rank, tag=tag)
496
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 326, in irecv_tensors
497
+ [default7]:[rank7]: meta = self._recv_meta(from_rank=from_rank, tag=tag)
498
+ [default7]:[rank7]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/p2p.py", line 246, in _recv_meta
499
+ [default7]:[rank7]: dist.recv(
500
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/c10d_logger.py", line 75, in wrapper
501
+ [default7]:[rank7]: return func(*args, **kwargs)
502
+ [default7]:[rank7]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py", line 1932, in recv
503
+ [default7]:[rank7]: pg.recv([tensor], group_src_rank, tag).wait()
504
+ [default7]:[rank7]: torch.distributed.DistBackendError: [7] is setting up NCCL communicator and retrieving ncclUniqueId from [0] via c10d key-value store by key '6:7', but store->get('6:7') got error: Connection reset by peer
505
+ [default7]:[rank7]: Exception raised from recvBytes at ../torch/csrc/distributed/c10d/Utils.hpp:672 (most recent call first):
506
+ [default7]:[rank7]: frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f333cc8d897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
507
+ [default7]:[rank7]: frame #1: <unknown function> + 0x5b3a23e (0x7f33767aa23e in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
508
+ [default7]:[rank7]: frame #2: c10d::TCPStore::doWait(c10::ArrayRef<std::string>, std::chrono::duration<long, std::ratio<1l, 1000l> >) + 0x2c7 (0x7f33767a4c87 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
509
+ [default7]:[rank7]: frame #3: c10d::TCPStore::doGet(std::string const&) + 0x32 (0x7f33767a4f82 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
510
+ [default7]:[rank7]: frame #4: c10d::TCPStore::get(std::string const&) + 0xa1 (0x7f33767a5fd1 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
511
+ [default7]:[rank7]: frame #5: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f337675a371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
512
+ [default7]:[rank7]: frame #6: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f337675a371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
513
+ [default7]:[rank7]: frame #7: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f337675a371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
514
+ [default7]:[rank7]: frame #8: c10d::PrefixStore::get(std::string const&) + 0x31 (0x7f337675a371 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
515
+ [default7]:[rank7]: frame #9: c10d::ProcessGroupNCCL::broadcastUniqueNCCLID(ncclUniqueId*, bool, std::string const&, int) + 0xa9 (0x7f333df67189 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
516
+ [default7]:[rank7]: frame #10: c10d::ProcessGroupNCCL::getNCCLComm(std::string const&, c10::Device&, c10d::OpType, int, bool) + 0xc50 (0x7f333df6e610 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
517
+ [default7]:[rank7]: frame #11: c10d::ProcessGroupNCCL::recv(std::vector<at::Tensor, std::allocator<at::Tensor> >&, int, int) + 0x5f8 (0x7f333df8d978 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
518
+ [default7]:[rank7]: frame #12: <unknown function> + 0x5adc309 (0x7f337674c309 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
519
+ [default7]:[rank7]: frame #13: <unknown function> + 0x5ae6f10 (0x7f3376756f10 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
520
+ [default7]:[rank7]: frame #14: <unknown function> + 0x5ae6fa5 (0x7f3376756fa5 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
521
+ [default7]:[rank7]: frame #15: <unknown function> + 0x5124446 (0x7f3375d94446 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
522
+ [default7]:[rank7]: frame #16: <unknown function> + 0x1acf4b8 (0x7f337273f4b8 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
523
+ [default7]:[rank7]: frame #17: <unknown function> + 0x5aee004 (0x7f337675e004 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
524
+ [default7]:[rank7]: frame #18: <unknown function> + 0x5af36b5 (0x7f33767636b5 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cpu.so)
525
+ [default7]:[rank7]: frame #19: <unknown function> + 0xd2631e (0x7f338934d31e in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
526
+ [default7]:[rank7]: frame #20: <unknown function> + 0x47def4 (0x7f3388aa4ef4 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_python.so)
527
+ [default7]:[rank7]: frame #21: <unknown function> + 0x1445a6 (0x56516430e5a6 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
528
+ [default7]:[rank7]: frame #22: _PyObject_MakeTpCall + 0x26b (0x565164307a6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
529
+ [default7]:[rank7]: frame #23: <unknown function> + 0x150866 (0x56516431a866 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
530
+ [default7]:[rank7]: frame #24: _PyEval_EvalFrameDefault + 0x4c12 (0x565164303142 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
531
+ [default7]:[rank7]: frame #25: _PyFunction_Vectorcall + 0x6c (0x56516430ea2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
532
+ [default7]:[rank7]: frame #26: PyObject_Call + 0xbc (0x56516431af1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
533
+ [default7]:[rank7]: frame #27: _PyEval_EvalFrameDefault + 0x2d83 (0x5651643012b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
534
+ [default7]:[rank7]: frame #28: _PyFunction_Vectorcall + 0x6c (0x56516430ea2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
535
+ [default7]:[rank7]: frame #29: _PyEval_EvalFrameDefault + 0x13ca (0x5651642ff8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
536
+ [default7]:[rank7]: frame #30: <unknown function> + 0x150582 (0x56516431a582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
537
+ [default7]:[rank7]: frame #31: _PyEval_EvalFrameDefault + 0x13ca (0x5651642ff8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
538
+ [default7]:[rank7]: frame #32: <unknown function> + 0x150582 (0x56516431a582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
539
+ [default7]:[rank7]: frame #33: _PyEval_EvalFrameDefault + 0x13ca (0x5651642ff8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
540
+ [default7]:[rank7]: frame #34: <unknown function> + 0x150582 (0x56516431a582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
541
+ [default7]:[rank7]: frame #35: _PyEval_EvalFrameDefault + 0x13ca (0x5651642ff8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
542
+ [default7]:[rank7]: frame #36: _PyObject_FastCallDictTstate + 0xd0 (0x565164306f50 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
543
+ [default7]:[rank7]: frame #37: _PyObject_Call_Prepend + 0x69 (0x565164318c39 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
544
+ [default7]:[rank7]: frame #38: <unknown function> + 0x211239 (0x5651643db239 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
545
+ [default7]:[rank7]: frame #39: _PyObject_MakeTpCall + 0x26b (0x565164307a6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
546
+ [default7]:[rank7]: frame #40: _PyEval_EvalFrameDefault + 0x4eb6 (0x5651643033e6 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
547
+ [default7]:[rank7]: frame #41: _PyFunction_Vectorcall + 0x6c (0x56516430ea2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
548
+ [default7]:[rank7]: frame #42: _PyEval_EvalFrameDefault + 0x72c (0x5651642fec5c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
549
+ [default7]:[rank7]: frame #43: _PyFunction_Vectorcall + 0x6c (0x56516430ea2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
550
+ [default7]:[rank7]: frame #44: _PyEval_EvalFrameDefault + 0x13ca (0x5651642ff8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
551
+ [default7]:[rank7]: frame #45: <unknown function> + 0x150582 (0x56516431a582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
552
+ [default7]:[rank7]: frame #46: PyObject_Call + 0xbc (0x56516431af1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
553
+ [default7]:[rank7]: frame #47: _PyEval_EvalFrameDefault + 0x2d83 (0x5651643012b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
554
+ [default7]:[rank7]: frame #48: <unknown function> + 0x150582 (0x56516431a582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
555
+ [default7]:[rank7]: frame #49: PyObject_Call + 0xbc (0x56516431af1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
556
+ [default7]:[rank7]: frame #50: _PyEval_EvalFrameDefault + 0x2d83 (0x5651643012b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
557
+ [default7]:[rank7]: frame #51: _PyFunction_Vectorcall + 0x6c (0x56516430ea2c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
558
+ [default7]:[rank7]: frame #52: _PyObject_FastCallDictTstate + 0x187 (0x565164307007 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
559
+ [default7]:[rank7]: frame #53: _PyObject_Call_Prepend + 0x69 (0x565164318c39 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
560
+ [default7]:[rank7]: frame #54: <unknown function> + 0x211239 (0x5651643db239 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
561
+ [default7]:[rank7]: frame #55: _PyObject_MakeTpCall + 0x26b (0x565164307a6b in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
562
+ [default7]:[rank7]: frame #56: _PyEval_EvalFrameDefault + 0x5723 (0x565164303c53 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
563
+ [default7]:[rank7]: frame #57: <unknown function> + 0x150582 (0x56516431a582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
564
+ [default7]:[rank7]: frame #58: _PyEval_EvalFrameDefault + 0x13ca (0x5651642ff8fa in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
565
+ [default7]:[rank7]: frame #59: <unknown function> + 0x150582 (0x56516431a582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
566
+ [default7]:[rank7]: frame #60: PyObject_Call + 0xbc (0x56516431af1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
567
+ [default7]:[rank7]: frame #61: _PyEval_EvalFrameDefault + 0x2d83 (0x5651643012b3 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
568
+ [default7]:[rank7]: frame #62: <unknown function> + 0x150582 (0x56516431a582 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
569
+ [default7]:[rank7]: frame #63: PyObject_Call + 0xbc (0x56516431af1c in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10)
570
+ [default7]:[rank7]: . This may indicate a possible application crash on rank 0 or a network set up issue.
571
+ W0703 23:10:49.532000 140032670197568 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 350538 closing signal SIGTERM
572
+ W0703 23:10:49.533000 140032670197568 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 350539 closing signal SIGTERM
573
+ W0703 23:10:49.533000 140032670197568 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 350540 closing signal SIGTERM
574
+ W0703 23:10:49.533000 140032670197568 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 350541 closing signal SIGTERM
575
+ E0703 23:10:50.966000 140032670197568 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 350537) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
576
+ Traceback (most recent call last):
577
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
578
+ sys.exit(main())
579
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
580
+ return f(*args, **kwargs)
581
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
582
+ run(args)
583
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
584
+ elastic_launch(
585
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
586
+ return launch_agent(self._config, self._entrypoint, list(args))
587
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
588
+ raise ChildFailedError(
589
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
590
+ ============================================================
591
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
592
+ ------------------------------------------------------------
593
+ Failures:
594
+ [1]:
595
+ time : 2024-07-03_23:10:49
596
+ host : ip-26-0-160-225.ec2.internal
597
+ rank : 5 (local_rank: 5)
598
+ exitcode : 1 (pid: 350542)
599
+ error_file: <N/A>
600
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
601
+ [2]:
602
+ time : 2024-07-03_23:10:49
603
+ host : ip-26-0-160-225.ec2.internal
604
+ rank : 6 (local_rank: 6)
605
+ exitcode : 1 (pid: 350543)
606
+ error_file: <N/A>
607
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
608
+ [3]:
609
+ time : 2024-07-03_23:10:49
610
+ host : ip-26-0-160-225.ec2.internal
611
+ rank : 7 (local_rank: 7)
612
+ exitcode : 1 (pid: 350544)
613
+ error_file: <N/A>
614
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
615
+ ------------------------------------------------------------
616
+ Root Cause (first observed failure):
617
+ [0]:
618
+ time : 2024-07-03_23:10:49
619
+ host : ip-26-0-160-225.ec2.internal
620
+ rank : 0 (local_rank: 0)
621
+ exitcode : 1 (pid: 350537)
622
+ error_file: <N/A>
623
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
624
+ ============================================================
625
+ srun: error: ip-26-0-160-225: task 0: Exited with exit code 1
626
+ Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
llama-1B/8_GPUS/dp-1_tp-1_pp-8_mbz-16/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oom