3outeille HF staff commited on
Commit
98a5112
1 Parent(s): a79d17f

Upload llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128

Browse files
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/bench.slurm ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_cluster
4
+ #SBATCH --time=02:00:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=1
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=normal
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/log.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/log.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
38
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
39
+ echo python3 version = $(python3 --version)
40
+ echo "========================"
41
+
42
+ # Slurm stuff
43
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
44
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
45
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
46
+
47
+ export TMPDIR=/scratch
48
+ export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
49
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
50
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
51
+
52
+ huggingface-cli login --token $HUGGINGFACE_TOKEN
53
+
54
+
55
+ NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
56
+ CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/config.yaml"
57
+
58
+ LAUNCHER="torchrun \
59
+ --nproc_per_node 8 \
60
+ --nnodes 1 \
61
+ --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
62
+ --rdzv_backend c10d \
63
+ --max_restarts 0 \
64
+ --tee 3 \
65
+ --node_rank ${SLURM_PROCID}"
66
+
67
+ # Checkout the bench_cluster branch
68
+ cd $NANOTRON_REPO
69
+ git checkout bench_cluster
70
+ cd ..
71
+ # Get the current job ID
72
+ job_id=${SLURM_JOB_ID}
73
+
74
+ # Update status to "pending" or "running" in the background
75
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/status.txt &
76
+
77
+ # Run the main command
78
+ srun -u $LAUNCHER $CMD
79
+ exit_status=$?
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/log.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/log.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/status.txt
89
+ elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/log.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/status.txt
93
+ fi
94
+ fi
95
+
96
+ # Run the report script if the job completed successfully
97
+ if [ $exit_status -eq 0 ]; then
98
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128 --is_logs
99
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128 --is_profiler
100
+ fi
101
+
102
+
103
+ # Push to hub the folder using huggingface_cli
104
+ huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128 llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128 --commit-message "Upload llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128"
105
+
106
+ # Verify the upload
107
+ if [ $? -eq 0 ]; then
108
+ echo "Uploading to Huggingface Hub successful"
109
+ else
110
+ echo "Failed to upload to Huggingface Hub"
111
+ fi
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/config.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ general:
2
+ project: bench_cluster
3
+ seed: 42
4
+ model:
5
+ ddp_bucket_cap_mb: 25
6
+ dtype: bfloat16
7
+ init_method:
8
+ std: 0.025
9
+ make_vocab_size_divisible_by: 1
10
+ model_config:
11
+ bos_token_id: 1
12
+ eos_token_id: 2
13
+ hidden_act: silu
14
+ hidden_size: 2048
15
+ initializer_range: 0.02
16
+ intermediate_size: 4096
17
+ is_llama_config: true
18
+ max_position_embeddings: 4096
19
+ num_attention_heads: 32
20
+ num_hidden_layers: 24
21
+ num_key_value_heads: 32
22
+ pad_token_id: null
23
+ pretraining_tp: 1
24
+ rms_norm_eps: 1.0e-05
25
+ rope_scaling: null
26
+ rope_theta: 10000.0
27
+ tie_word_embeddings: true
28
+ use_cache: true
29
+ vocab_size: 50257
30
+ optimizer:
31
+ accumulate_grad_in_fp32: true
32
+ clip_grad: 1.0
33
+ learning_rate_scheduler:
34
+ learning_rate: 0.0001
35
+ lr_decay_style: linear
36
+ lr_warmup_style: linear
37
+ lr_warmup_steps: 1
38
+ min_decay_lr: 1.0e-05
39
+ optimizer_factory:
40
+ adam_beta1: 0.9
41
+ adam_beta2: 0.95
42
+ adam_eps: 1.0e-08
43
+ name: adamW
44
+ torch_adam_is_fused: true
45
+ weight_decay: 0.01
46
+ zero_stage: 1
47
+ parallelism:
48
+ dp: 8
49
+ expert_parallel_size: 1
50
+ pp: 1
51
+ pp_engine: 1f1b
52
+ tp: 1
53
+ tp_linear_async_communication: false
54
+ tp_mode: REDUCE_SCATTER
55
+ profiler:
56
+ profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128
57
+ tokenizer:
58
+ tokenizer_max_length: null
59
+ tokenizer_name_or_path: openai-community/gpt2
60
+ tokenizer_revision: null
61
+ data_stages:
62
+ - name: Training Stage
63
+ start_training_step: 1
64
+ data:
65
+ dataset:
66
+ dataset_overwrite_cache: false
67
+ dataset_processing_num_proc_per_process: 64
68
+ hf_dataset_config_name: null
69
+ hf_dataset_or_datasets: roneneldan/TinyStories
70
+ hf_dataset_splits: train
71
+ text_column_name: text
72
+ num_loading_workers: 0
73
+ seed: 42
74
+ lighteval: null
75
+ tokens:
76
+ train_steps: 20
77
+ val_check_interval: -1
78
+ batch_accumulation_per_replica: 1
79
+ limit_test_batches: 0
80
+ limit_val_batches: 0
81
+ micro_batch_size: 128
82
+ sequence_length: 4096
83
+ logging:
84
+ iteration_step_info_interval: 1
85
+ log_level: info
86
+ log_level_replica: info
87
+ checkpoints:
88
+ checkpoint_interval: 100000
89
+ checkpoints_path: /dev/null
90
+ resume_checkpoint_path: null
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/log.out ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ========================
2
+ START TIME: Wed Jul 3 20:50:21 UTC 2024
3
+ python3 version = Python 3.10.14
4
+ ========================
5
+ The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
6
+ Token is valid (permission: write).
7
+ Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
8
+ Login successful
9
+ Already on 'bench_cluster'
10
+ M examples/config_tiny_llama.py
11
+ M examples/config_tiny_llama.yaml
12
+ M examples/train_tiny_llama.sh
13
+ M src/nanotron/models/llama.py
14
+ M src/nanotron/trainer.py
15
+ Your branch is up to date with 'origin/bench_cluster'.
16
+ Job status: RUNNING
17
+ W0703 20:50:29.341000 140462812669760 torch/distributed/run.py:757]
18
+ W0703 20:50:29.341000 140462812669760 torch/distributed/run.py:757] *****************************************
19
+ W0703 20:50:29.341000 140462812669760 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
20
+ W0703 20:50:29.341000 140462812669760 torch/distributed/run.py:757] *****************************************
21
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Config:
22
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Config(general=GeneralArgs(project='bench_cluster',
23
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: run='%date_%jobid',
24
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: seed=42,
25
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: step=None,
26
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: consumed_train_samples=None,
27
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: benchmark_csv_path=None,
28
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: ignore_sanity_checks=True),
29
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: parallelism=ParallelismArgs(dp=8,
30
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pp=1,
31
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp=1,
32
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7ff7a0888820>,
33
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
34
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tp_linear_async_communication=False,
35
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: expert_parallel_size=1),
36
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
37
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: eos_token_id=2,
38
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_act='silu',
39
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_size=2048,
40
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: initializer_range=0.02,
41
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: intermediate_size=4096,
42
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: is_llama_config=True,
43
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: max_position_embeddings=4096,
44
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_attention_heads=32,
45
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_hidden_layers=24,
46
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_key_value_heads=32,
47
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pad_token_id=None,
48
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pretraining_tp=1,
49
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rms_norm_eps=1e-05,
50
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_scaling=None,
51
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_theta=10000.0,
52
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tie_word_embeddings=True,
53
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: use_cache=True,
54
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: vocab_size=50257),
55
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: init_method=RandomInit(std=0.025),
56
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dtype=torch.bfloat16,
57
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: make_vocab_size_divisible_by=1,
58
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: ddp_bucket_cap_mb=25),
59
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
60
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer_revision=None,
61
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokenizer_max_length=None),
62
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
63
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoint_interval=100000,
64
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: save_initial_state=False,
65
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: resume_checkpoint_path=None,
66
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: checkpoints_path_is_shared_file_system=False),
67
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: logging=LoggingArgs(log_level='info',
68
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: log_level_replica='info',
69
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: iteration_step_info_interval=1),
70
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tokens=TokensArgs(sequence_length=4096,
71
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: train_steps=20,
72
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: micro_batch_size=128,
73
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: batch_accumulation_per_replica=1,
74
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: val_check_interval=-1,
75
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: limit_val_batches=0,
76
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: limit_test_batches=0),
77
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
78
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: adam_beta1=0.9,
79
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: adam_beta2=0.95,
80
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: torch_adam_is_fused=True,
81
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: name='adamW'),
82
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: zero_stage=1,
83
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: weight_decay=0.01,
84
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: clip_grad=1.0,
85
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: accumulate_grad_in_fp32=True,
86
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
87
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_warmup_steps=1,
88
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_warmup_style='linear',
89
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_style='linear',
90
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_steps=19,
91
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lr_decay_starting_step=None,
92
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: min_decay_lr=1e-05)),
93
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: data_stages=[DatasetStageArgs(name='Training Stage',
94
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: start_training_step=1,
95
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
96
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hf_dataset_splits='train',
97
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hf_dataset_config_name=None,
98
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dataset_processing_num_proc_per_process=64,
99
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: dataset_overwrite_cache=False,
100
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: text_column_name='text'),
101
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: seed=42,
102
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_loading_workers=0))],
103
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128')),
104
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: lighteval=None)
105
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Model Config:
106
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: LlamaConfig(bos_token_id=1,
107
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: eos_token_id=2,
108
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_act='silu',
109
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: hidden_size=2048,
110
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: initializer_range=0.02,
111
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: intermediate_size=4096,
112
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: is_llama_config=True,
113
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: max_position_embeddings=4096,
114
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_attention_heads=32,
115
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_hidden_layers=24,
116
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: num_key_value_heads=32,
117
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pad_token_id=None,
118
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: pretraining_tp=1,
119
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rms_norm_eps=1e-05,
120
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_scaling=None,
121
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: rope_theta=10000.0,
122
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: tie_word_embeddings=True,
123
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: use_cache=True,
124
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: vocab_size=50257)
125
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Building model..
126
+ [default0]:07/03/2024 20:50:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Setting PP block ranks...
127
+ [default2]:07/03/2024 20:50:59 [INFO|DP=2|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
128
+ [default1]:07/03/2024 20:50:59 [INFO|DP=1|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
129
+ [default0]:07/03/2024 20:50:59 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Total number of parameters: 1.11G (2116.51MiB)
130
+ [default0]:07/03/2024 20:50:59 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Local number of parameters: 1.11G (2116.51MiB)
131
+ [default0]:07/03/2024 20:50:59 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [After model building] Memory usage: 2140.53MiB. Peak allocated: 2338.88MiB Peak reserved: 2392.00MiB
132
+ [default0]:07/03/2024 20:50:59 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
133
+ [default0]:07/03/2024 20:50:59 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Parametrizing model parameters using StandardParametrizator
134
+ [default3]:07/03/2024 20:50:59 [INFO|DP=3|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
135
+ [default4]:07/03/2024 20:50:59 [INFO|DP=4|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
136
+ [default7]:07/03/2024 20:50:59 [INFO|DP=7|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
137
+ [default6]:07/03/2024 20:50:59 [INFO|DP=6|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
138
+ [default5]:07/03/2024 20:50:59 [INFO|DP=5|PP=0|TP=0|ip-26-0-174-36]: No checkpoint path provided.
139
+ [default0]:07/03/2024 20:51:06 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Optimizer Building] Using LearningRateForSP as learning rate
140
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] Size of optimizer params per rank:
141
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 0 has 139M out of 1.11G (12.50%) params' optimizer states
142
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 1 has 139M out of 1.11G (12.50%) params' optimizer states
143
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 2 has 139M out of 1.11G (12.50%) params' optimizer states
144
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 3 has 139M out of 1.11G (12.50%) params' optimizer states
145
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 4 has 139M out of 1.11G (12.50%) params' optimizer states
146
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 5 has 139M out of 1.11G (12.50%) params' optimizer states
147
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 6 has 139M out of 1.11G (12.50%) params' optimizer states
148
+ [default0]:07/03/2024 20:51:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [ZeRO sharding] DP Rank 7 has 139M out of 1.11G (12.50%) params' optimizer states
149
+ [default0]:07/03/2024 20:51:08 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
150
+ [default0]:07/03/2024 20:51:08 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Using `datasets` library
151
+ [default0]:07/03/2024 20:51:08 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
152
+ [default0]:07/03/2024 20:51:08 [WARNING|DP=0|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
153
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
154
+ [default0]:07/03/2024 20:51:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Training Plan] There are 1 training stages
155
+ [default0]:07/03/2024 20:51:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Stage Training Stage] start from step 1
156
+ [default0]:07/03/2024 20:51:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]:
157
+ [default0]:07/03/2024 20:51:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: [Start training] datetime: 2024-07-03 20:51:11.242128 | mbs: 128 | grad_accum: 1 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
158
+ [default0]:07/03/2024 20:51:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
159
+ [default0]:07/03/2024 20:51:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-174-36]: Memory usage: 6904.53MiB. Peak allocated 6904.53MiB. Peak reserved: 7156.00MiB
160
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
161
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
162
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
163
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
164
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
165
+ [default1]:07/03/2024 20:51:11 [WARNING|DP=1|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
166
+ [default2]:07/03/2024 20:51:11 [WARNING|DP=2|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
167
+ [default6]:07/03/2024 20:51:11 [WARNING|DP=6|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
168
+ [default7]:07/03/2024 20:51:11 [WARNING|DP=7|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
169
+ [default4]:07/03/2024 20:51:11 [WARNING|DP=4|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
170
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
171
+ [default3]:07/03/2024 20:51:11 [WARNING|DP=3|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
172
+ [default5]:07/03/2024 20:51:11 [WARNING|DP=5|PP=0|TP=0|ip-26-0-174-36]: Repo card metadata block was not found. Setting CardData to empty.
173
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
174
+ [default4]:[rank4]: Traceback (most recent call last):
175
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
176
+ [default4]:[rank4]: trainer.train(dataloader)
177
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
178
+ [default4]:[rank4]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
179
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
180
+ [default4]:[rank4]: outputs = self.pipeline_engine.train_batch_iter(
181
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
182
+ [default4]:[rank4]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
183
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
184
+ [default4]:[rank4]: output = model(**micro_batch)
185
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
186
+ [default4]:[rank4]: return self._call_impl(*args, **kwargs)
187
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
188
+ [default4]:[rank4]: return forward_call(*args, **kwargs)
189
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
190
+ [default4]:[rank4]: sharded_logits = self.model(
191
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
192
+ [default4]:[rank4]: return self._call_impl(*args, **kwargs)
193
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
194
+ [default4]:[rank4]: return forward_call(*args, **kwargs)
195
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
196
+ [default4]:[rank4]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
197
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
198
+ [default4]:[rank4]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
199
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
200
+ [default4]:[rank4]: return self._call_impl(*args, **kwargs)
201
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
202
+ [default4]:[rank4]: return forward_call(*args, **kwargs)
203
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
204
+ [default4]:[rank4]: output = self.pp_block(**new_kwargs)
205
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
206
+ [default4]:[rank4]: return self._call_impl(*args, **kwargs)
207
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
208
+ [default4]:[rank4]: return forward_call(*args, **kwargs)
209
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
210
+ [default4]:[rank4]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
211
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
212
+ [default4]:[rank4]: return self._call_impl(*args, **kwargs)
213
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
214
+ [default4]:[rank4]: return forward_call(*args, **kwargs)
215
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 565, in forward
216
+ [default4]:[rank4]: key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous()
217
+ [default4]:[rank4]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU  has a total capacity of 79.33 GiB of which 557.94 MiB is free. Including non-PyTorch memory, this process has 78.77 GiB memory in use. Of the allocated memory 64.85 GiB is allocated by PyTorch, and 2.18 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
218
+ [default0]:[rank0]: Traceback (most recent call last):
219
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
220
+ [default0]:[rank0]: trainer.train(dataloader)
221
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
222
+ [default0]:[rank0]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
223
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
224
+ [default0]:[rank0]: outputs = self.pipeline_engine.train_batch_iter(
225
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
226
+ [default0]:[rank0]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
227
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
228
+ [default0]:[rank0]: output = model(**micro_batch)
229
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
230
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
231
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
232
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
233
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
234
+ [default0]:[rank0]: sharded_logits = self.model(
235
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
236
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
237
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
238
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
239
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
240
+ [default0]:[rank0]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
241
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
242
+ [default0]:[rank0]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
243
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
244
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
245
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
246
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
247
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
248
+ [default0]:[rank0]: output = self.pp_block(**new_kwargs)
249
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
250
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
251
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
252
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
253
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
254
+ [default0]:[rank0]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
255
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
256
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
257
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
258
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
259
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 565, in forward
260
+ [default0]:[rank0]: key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous()
261
+ [default0]:[rank0]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU
262
+ [default1]:[rank1]: Traceback (most recent call last):
263
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
264
+ [default1]:[rank1]: trainer.train(dataloader)
265
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
266
+ [default1]:[rank1]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
267
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
268
+ [default1]:[rank1]: outputs = self.pipeline_engine.train_batch_iter(
269
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
270
+ [default1]:[rank1]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
271
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
272
+ [default1]:[rank1]: output = model(**micro_batch)
273
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
274
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
275
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
276
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
277
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
278
+ [default1]:[rank1]: sharded_logits = self.model(
279
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
280
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
281
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
282
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
283
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
284
+ [default1]:[rank1]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
285
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
286
+ [default1]:[rank1]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
287
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
288
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
289
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
290
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
291
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
292
+ [default1]:[rank1]: output = self.pp_block(**new_kwargs)
293
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
294
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
295
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
296
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
297
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
298
+ [default1]:[rank1]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
299
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
300
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
301
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
302
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
303
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 565, in forward
304
+ [default1]:[rank1]: key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous()
305
+ [default1]:[rank1]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU  has a total capacity of 79.33 GiB of which 557.94 MiB is free. Including non-PyTorch memory, this process has 78.77 GiB memory in use. Of the allocated memory 64.85 GiB is allocated by PyTorch, and 2.18 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
306
+ [default3]:[rank3]: Traceback (most recent call last):
307
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
308
+ [default3]:[rank3]: trainer.train(dataloader)
309
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
310
+ [default3]:[rank3]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
311
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
312
+ [default3]:[rank3]: outputs = self.pipeline_engine.train_batch_iter(
313
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
314
+ [default3]:[rank3]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
315
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
316
+ [default3]:[rank3]: output = model(**micro_batch)
317
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
318
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
319
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
320
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
321
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
322
+ [default3]:[rank3]: sharded_logits = self.model(
323
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
324
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
325
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
326
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
327
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
328
+ [default3]:[rank3]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
329
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
330
+ [default3]:[rank3]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
331
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
332
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
333
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
334
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
335
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
336
+ [default3]:[rank3]: output = self.pp_block(**new_kwargs)
337
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
338
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
339
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
340
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
341
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
342
+ [default3]:[rank3]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
343
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
344
+ [default3]:[rank3]: return self._call_impl(*args, **kwargs)
345
+ [default3]:[rank3]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
346
+ [default3]:[rank3]: return forward_call(*args, **kwargs)
347
+ [default3]:[rank3]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 565, in forward
348
+ [default3]:[rank3]: key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous()
349
+ [default3]:[rank3]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU  has a total capacity of 79.33 GiB of which 557.94 MiB is free. Including non-PyTorch memory, this process has 78.77 GiB memory in use. Of the allocated memory 64.85 GiB is allocated by PyTorch, and 2.18 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
350
+ [default5]:[rank5]: Traceback (most recent call last):
351
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
352
+ [default5]:[rank5]: trainer.train(dataloader)
353
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
354
+ [default5]:[rank5]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
355
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
356
+ [default5]:[rank5]: outputs = self.pipeline_engine.train_batch_iter(
357
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
358
+ [default5]:[rank5]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
359
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
360
+ [default5]:[rank5]: output = model(**micro_batch)
361
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
362
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
363
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
364
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
365
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
366
+ [default5]:[rank5]: sharded_logits = self.model(
367
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
368
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
369
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
370
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
371
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
372
+ [default5]:[rank5]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
373
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
374
+ [default5]:[rank5]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
375
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
376
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
377
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
378
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
379
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
380
+ [default5]:[rank5]: output = self.pp_block(**new_kwargs)
381
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
382
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
383
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
384
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
385
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
386
+ [default5]:[rank5]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
387
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
388
+ [default5]:[rank5]: return self._call_impl(*args, **kwargs)
389
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
390
+ [default5]:[rank5]: return forward_call(*args, **kwargs)
391
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 565, in forward
392
+ [default5]:[rank5]: key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous()
393
+ [default5]:[rank5]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU  has a total capacity of 79.33 GiB of which 557.94 MiB is free. Including non-PyTorch memory, this process has 78.77 GiB memory in use. Of the allocated memory 64.85 GiB is allocated by PyTorch, and 2.18 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
394
+ [default2]:[rank2]: Traceback (most recent call last):
395
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
396
+ [default2]:[rank2]: trainer.train(dataloader)
397
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
398
+ [default2]:[rank2]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
399
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
400
+ [default2]:[rank2]: outputs = self.pipeline_engine.train_batch_iter(
401
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
402
+ [default2]:[rank2]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
403
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
404
+ [default2]:[rank2]: output = model(**micro_batch)
405
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
406
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
407
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
408
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
409
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
410
+ [default2]:[rank2]: sharded_logits = self.model(
411
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
412
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
413
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
414
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
415
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
416
+ [default2]:[rank2]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
417
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
418
+ [default2]:[rank2]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
419
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
420
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
421
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
422
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
423
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
424
+ [default2]:[rank2]: output = self.pp_block(**new_kwargs)
425
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
426
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
427
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
428
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
429
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
430
+ [default2]:[rank2]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
431
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
432
+ [default2]:[rank2]: return self._call_impl(*args, **kwargs)
433
+ [default2]:[rank2]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
434
+ [default2]:[rank2]: return forward_call(*args, **kwargs)
435
+ [default2]:[rank2]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 565, in forward
436
+ [default2]:[rank2]: key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous()
437
+ [default2]:[rank2]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU  has a total capacity of 79.33 GiB of which 557.94 MiB is free. Including non-PyTorch memory, this process has 78.77 GiB memory in use. Of the allocated memory 64.85 GiB is allocated by PyTorch, and 2.18 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
438
+ W0703 20:51:19.664000 140462812669760 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 214255 closing signal SIGTERM
439
+ W0703 20:51:19.664000 140462812669760 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 214256 closing signal SIGTERM
440
+ W0703 20:51:19.664000 140462812669760 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 214258 closing signal SIGTERM
441
+ W0703 20:51:19.665000 140462812669760 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 214259 closing signal SIGTERM
442
+ W0703 20:51:19.665000 140462812669760 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 214260 closing signal SIGTERM
443
+ [default6]:[rank6]: Traceback (most recent call last):
444
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
445
+ [default6]:[rank6]: trainer.train(dataloader)
446
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
447
+ [default6]:[rank6]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
448
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
449
+ [default6]:[rank6]: outputs = self.pipeline_engine.train_batch_iter(
450
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
451
+ [default6]:[rank6]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
452
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
453
+ [default6]:[rank6]: output = model(**micro_batch)
454
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
455
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
456
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
457
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
458
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
459
+ [default6]:[rank6]: sharded_logits = self.model(
460
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
461
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
462
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
463
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
464
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
465
+ [default6]:[rank6]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
466
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
467
+ [default6]:[rank6]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
468
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
469
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
470
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
471
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
472
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
473
+ [default6]:[rank6]: output = self.pp_block(**new_kwargs)
474
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
475
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
476
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
477
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
478
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
479
+ [default6]:[rank6]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
480
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
481
+ [default6]:[rank6]: return self._call_impl(*args, **kwargs)
482
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
483
+ [default6]:[rank6]: return forward_call(*args, **kwargs)
484
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 565, in forward
485
+ [default6]:[rank6]: key_value_states = key_value_states.permute(1, 2, 0, 3, 4).contiguous()
486
+ [default6]:[rank6]: torch.cuda.OutOfMemoryError: CUDA out of memory. Tried to allocate 4.00 GiB. GPU  has a total capacity of 79.33 GiB of which 557.94 MiB is free. Including non-PyTorch memory, this process has 78.77 GiB memory in use. Of the allocated memory 64.85 GiB is allocated by PyTorch, and 2.18 GiB is reserved by PyTorch but unallocated. If reserved but unallocated memory is large try setting PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True to avoid fragmentation. See documentation for Memory Management (https://pytorch.org/docs/stable/notes/cuda.html#environment-variables)
487
+ E0703 20:51:21.188000 140462812669760 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 214253) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
488
+ Traceback (most recent call last):
489
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
490
+ sys.exit(main())
491
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
492
+ return f(*args, **kwargs)
493
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
494
+ run(args)
495
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
496
+ elastic_launch(
497
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
498
+ return launch_agent(self._config, self._entrypoint, list(args))
499
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
500
+ raise ChildFailedError(
501
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
502
+ ============================================================
503
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
504
+ ------------------------------------------------------------
505
+ Failures:
506
+ [1]:
507
+ time : 2024-07-03_20:51:19
508
+ host : ip-26-0-174-36.ec2.internal
509
+ rank : 1 (local_rank: 1)
510
+ exitcode : 1 (pid: 214254)
511
+ error_file: <N/A>
512
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
513
+ [2]:
514
+ time : 2024-07-03_20:51:19
515
+ host : ip-26-0-174-36.ec2.internal
516
+ rank : 4 (local_rank: 4)
517
+ exitcode : 1 (pid: 214257)
518
+ error_file: <N/A>
519
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
520
+ ------------------------------------------------------------
521
+ Root Cause (first observed failure):
522
+ [0]:
523
+ time : 2024-07-03_20:51:19
524
+ host : ip-26-0-174-36.ec2.internal
525
+ rank : 0 (local_rank: 0)
526
+ exitcode : 1 (pid: 214253)
527
+ error_file: <N/A>
528
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
529
+ ============================================================
530
+ srun: error: ip-26-0-174-36: task 0: Exited with exit code 1
531
+ Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-128/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ oom