3outeille HF staff commited on
Commit
75f47fd
1 Parent(s): 7718b3d

Upload llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8

Browse files
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/bench.slurm ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_cluster
4
+ #SBATCH --time=00:59:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=2
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=high
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/log.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/log.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
38
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
39
+ echo python3 version = $(python3 --version)
40
+ echo "========================"
41
+
42
+ # Slurm stuff
43
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
44
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
45
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
46
+
47
+ export TMPDIR=/scratch
48
+ export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
49
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
50
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
51
+
52
+ huggingface-cli login --token $HUGGINGFACE_TOKEN
53
+
54
+
55
+ NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
56
+ CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/config.yaml"
57
+
58
+ LAUNCHER="torchrun \
59
+ --nproc_per_node 8 \
60
+ --nnodes 2 \
61
+ --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
62
+ --rdzv_backend c10d \
63
+ --max_restarts 0 \
64
+ --tee 3 \
65
+ --node_rank ${SLURM_PROCID}"
66
+
67
+ # Checkout the bench_cluster branch
68
+ cd $NANOTRON_REPO
69
+ git checkout bench_cluster
70
+ cd ..
71
+ # Get the current job ID
72
+ job_id=${SLURM_JOB_ID}
73
+
74
+ # Update status to "pending" or "running" in the background
75
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/status.txt &
76
+
77
+ # Run the main command
78
+ srun -u $LAUNCHER $CMD
79
+ exit_status=$?
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/log.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/log.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/status.txt
89
+ elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/log.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/status.txt
93
+ fi
94
+ fi
95
+
96
+ # Run the report script if the job completed successfully
97
+ if [ $exit_status -eq 0 ]; then
98
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8 --is_logs
99
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8 --is_profiler
100
+ fi
101
+
102
+
103
+ # Push to hub the folder using huggingface_cli
104
+ huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8 llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8 --commit-message "Upload llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8"
105
+
106
+ # Verify the upload
107
+ if [ $? -eq 0 ]; then
108
+ echo "Uploading to Huggingface Hub successful"
109
+ else
110
+ echo "Failed to upload to Huggingface Hub"
111
+ fi
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/config.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ general:
2
+ project: bench_cluster
3
+ seed: 42
4
+ model:
5
+ ddp_bucket_cap_mb: 25
6
+ dtype: bfloat16
7
+ init_method:
8
+ std: 0.025
9
+ make_vocab_size_divisible_by: 1
10
+ model_config:
11
+ bos_token_id: 1
12
+ eos_token_id: 2
13
+ hidden_act: silu
14
+ hidden_size: 2048
15
+ initializer_range: 0.02
16
+ intermediate_size: 4096
17
+ is_llama_config: true
18
+ max_position_embeddings: 4096
19
+ num_attention_heads: 32
20
+ num_hidden_layers: 24
21
+ num_key_value_heads: 32
22
+ pad_token_id: null
23
+ pretraining_tp: 1
24
+ rms_norm_eps: 1.0e-05
25
+ rope_scaling: null
26
+ rope_theta: 10000.0
27
+ tie_word_embeddings: true
28
+ use_cache: true
29
+ vocab_size: 50257
30
+ optimizer:
31
+ accumulate_grad_in_fp32: true
32
+ clip_grad: 1.0
33
+ learning_rate_scheduler:
34
+ learning_rate: 0.0001
35
+ lr_decay_style: linear
36
+ lr_warmup_style: linear
37
+ lr_warmup_steps: 1
38
+ min_decay_lr: 1.0e-05
39
+ optimizer_factory:
40
+ adam_beta1: 0.9
41
+ adam_beta2: 0.95
42
+ adam_eps: 1.0e-08
43
+ name: adamW
44
+ torch_adam_is_fused: true
45
+ weight_decay: 0.01
46
+ zero_stage: 1
47
+ parallelism:
48
+ dp: 4
49
+ expert_parallel_size: 1
50
+ pp: 1
51
+ pp_engine: 1f1b
52
+ tp: 4
53
+ tp_linear_async_communication: false
54
+ tp_mode: REDUCE_SCATTER
55
+ profiler:
56
+ profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8
57
+ tokenizer:
58
+ tokenizer_max_length: null
59
+ tokenizer_name_or_path: openai-community/gpt2
60
+ tokenizer_revision: null
61
+ data_stages:
62
+ - name: Training Stage
63
+ start_training_step: 1
64
+ data:
65
+ dataset:
66
+ dataset_overwrite_cache: false
67
+ dataset_processing_num_proc_per_process: 64
68
+ hf_dataset_config_name: null
69
+ hf_dataset_or_datasets: roneneldan/TinyStories
70
+ hf_dataset_splits: train
71
+ text_column_name: text
72
+ num_loading_workers: 32
73
+ seed: 42
74
+ lighteval: null
75
+ tokens:
76
+ train_steps: 20
77
+ val_check_interval: -1
78
+ batch_accumulation_per_replica: 32
79
+ limit_test_batches: 0
80
+ limit_val_batches: 0
81
+ micro_batch_size: 8
82
+ sequence_length: 4096
83
+ logging:
84
+ iteration_step_info_interval: 1
85
+ log_level: info
86
+ log_level_replica: info
87
+ checkpoints:
88
+ checkpoint_interval: 100000
89
+ checkpoints_path: /dev/null
90
+ resume_checkpoint_path: null
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/log.out ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ========================
2
+ START TIME: Tue Jul 2 16:32:40 UTC 2024
3
+ python3 version = Python 3.10.14
4
+ ========================
5
+ The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
6
+ Token is valid (permission: write).
7
+ Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
8
+ Login successful
9
+ Already on 'bench_cluster'
10
+ M examples/config_tiny_llama.py
11
+ M examples/config_tiny_llama.yaml
12
+ M examples/train_tiny_llama.sh
13
+ M src/nanotron/models/llama.py
14
+ M src/nanotron/trainer.py
15
+ Your branch is up to date with 'origin/bench_cluster'.
16
+ Job status: RUNNING
17
+ W0702 16:32:42.731000 139765262067520 torch/distributed/run.py:757]
18
+ W0702 16:32:42.731000 139765262067520 torch/distributed/run.py:757] *****************************************
19
+ W0702 16:32:42.731000 139765262067520 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
20
+ W0702 16:32:42.731000 139765262067520 torch/distributed/run.py:757] *****************************************
21
+ W0702 16:32:42.758000 140347947353920 torch/distributed/run.py:757]
22
+ W0702 16:32:42.758000 140347947353920 torch/distributed/run.py:757] *****************************************
23
+ W0702 16:32:42.758000 140347947353920 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
24
+ W0702 16:32:42.758000 140347947353920 torch/distributed/run.py:757] *****************************************
25
+ [default0]:07/02/2024 16:33:01 [WARNING|DP=0|PP=0|TP=0|ip-26-0-163-43]: [Vocab Size Padding] Padded vocab (size: 50257) with 3 dummy tokens (new size: 50260)
26
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Config:
27
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Config(general=GeneralArgs(project='bench_cluster',
28
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: run='%date_%jobid',
29
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: seed=42,
30
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: step=None,
31
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: consumed_train_samples=None,
32
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: benchmark_csv_path=None,
33
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: ignore_sanity_checks=True),
34
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: parallelism=ParallelismArgs(dp=4,
35
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: pp=1,
36
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tp=4,
37
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f25b82ec910>,
38
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
39
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tp_linear_async_communication=False,
40
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: expert_parallel_size=1),
41
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
42
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: eos_token_id=2,
43
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: hidden_act='silu',
44
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: hidden_size=2048,
45
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: initializer_range=0.02,
46
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: intermediate_size=4096,
47
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: is_llama_config=True,
48
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: max_position_embeddings=4096,
49
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: num_attention_heads=32,
50
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: num_hidden_layers=24,
51
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: num_key_value_heads=32,
52
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: pad_token_id=None,
53
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: pretraining_tp=1,
54
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: rms_norm_eps=1e-05,
55
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: rope_scaling=None,
56
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: rope_theta=10000.0,
57
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tie_word_embeddings=True,
58
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: use_cache=True,
59
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: vocab_size=50260),
60
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: init_method=RandomInit(std=0.025),
61
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: dtype=torch.bfloat16,
62
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: make_vocab_size_divisible_by=1,
63
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: ddp_bucket_cap_mb=25),
64
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
65
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tokenizer_revision=None,
66
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tokenizer_max_length=None),
67
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
68
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: checkpoint_interval=100000,
69
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: save_initial_state=False,
70
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: resume_checkpoint_path=None,
71
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: checkpoints_path_is_shared_file_system=False),
72
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: logging=LoggingArgs(log_level='info',
73
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: log_level_replica='info',
74
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: iteration_step_info_interval=1),
75
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tokens=TokensArgs(sequence_length=4096,
76
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: train_steps=20,
77
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: micro_batch_size=8,
78
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: batch_accumulation_per_replica=32,
79
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: val_check_interval=-1,
80
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: limit_val_batches=0,
81
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: limit_test_batches=0),
82
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
83
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: adam_beta1=0.9,
84
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: adam_beta2=0.95,
85
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: torch_adam_is_fused=True,
86
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: name='adamW'),
87
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: zero_stage=1,
88
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: weight_decay=0.01,
89
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: clip_grad=1.0,
90
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: accumulate_grad_in_fp32=True,
91
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
92
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: lr_warmup_steps=1,
93
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: lr_warmup_style='linear',
94
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: lr_decay_style='linear',
95
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: lr_decay_steps=19,
96
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: lr_decay_starting_step=None,
97
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: min_decay_lr=1e-05)),
98
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: data_stages=[DatasetStageArgs(name='Training Stage',
99
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: start_training_step=1,
100
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
101
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: hf_dataset_splits='train',
102
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: hf_dataset_config_name=None,
103
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: dataset_processing_num_proc_per_process=64,
104
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: dataset_overwrite_cache=False,
105
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: text_column_name='text'),
106
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: seed=42,
107
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: num_loading_workers=32))],
108
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8')),
109
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: lighteval=None)
110
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Model Config:
111
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: LlamaConfig(bos_token_id=1,
112
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: eos_token_id=2,
113
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: hidden_act='silu',
114
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: hidden_size=2048,
115
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: initializer_range=0.02,
116
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: intermediate_size=4096,
117
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: is_llama_config=True,
118
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: max_position_embeddings=4096,
119
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: num_attention_heads=32,
120
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: num_hidden_layers=24,
121
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: num_key_value_heads=32,
122
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: pad_token_id=None,
123
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: pretraining_tp=1,
124
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: rms_norm_eps=1e-05,
125
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: rope_scaling=None,
126
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: rope_theta=10000.0,
127
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: tie_word_embeddings=True,
128
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: use_cache=True,
129
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: vocab_size=50260)
130
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Building model..
131
+ [default0]:07/02/2024 16:33:01 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Setting PP block ranks...
132
+ [default1]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=1|ip-26-0-163-43]: Local number of parameters: 277M (529.27MiB)
133
+ [default1]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=1|ip-26-0-163-43]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
134
+ [default1]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=1|ip-26-0-163-43]: No checkpoint path provided.
135
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Total number of parameters: 1.11G (2117.09MiB)
136
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Local number of parameters: 277M (529.27MiB)
137
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
138
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: No checkpoint path provided.
139
+ [default0]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Parametrizing model parameters using StandardParametrizator
140
+ [default3]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=3|ip-26-0-163-43]: Local number of parameters: 277M (529.27MiB)
141
+ [default3]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=3|ip-26-0-163-43]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
142
+ [default3]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=3|ip-26-0-163-43]: No checkpoint path provided.
143
+ [default2]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=2|ip-26-0-163-43]: Local number of parameters: 277M (529.27MiB)
144
+ [default2]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=2|ip-26-0-163-43]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
145
+ [default2]:07/02/2024 16:33:14 [INFO|DP=0|PP=0|TP=2|ip-26-0-163-43]: No checkpoint path provided.
146
+ [default2]:07/02/2024 16:33:14 [INFO|DP=2|PP=0|TP=2|ip-26-0-169-207]: No checkpoint path provided.
147
+ [default0]:07/02/2024 16:33:14 [INFO|DP=2|PP=0|TP=0|ip-26-0-169-207]: No checkpoint path provided.
148
+ [default3]:07/02/2024 16:33:14 [INFO|DP=2|PP=0|TP=3|ip-26-0-169-207]: No checkpoint path provided.
149
+ [default1]:07/02/2024 16:33:14 [INFO|DP=2|PP=0|TP=1|ip-26-0-169-207]: No checkpoint path provided.
150
+ [default5]:07/02/2024 16:33:14 [INFO|DP=1|PP=0|TP=1|ip-26-0-163-43]: No checkpoint path provided.
151
+ [default6]:07/02/2024 16:33:14 [INFO|DP=1|PP=0|TP=2|ip-26-0-163-43]: No checkpoint path provided.
152
+ [default4]:07/02/2024 16:33:14 [INFO|DP=1|PP=0|TP=0|ip-26-0-163-43]: No checkpoint path provided.
153
+ [default7]:07/02/2024 16:33:14 [INFO|DP=1|PP=0|TP=3|ip-26-0-163-43]: No checkpoint path provided.
154
+ [default6]:07/02/2024 16:33:14 [INFO|DP=3|PP=0|TP=2|ip-26-0-169-207]: No checkpoint path provided.
155
+ [default4]:07/02/2024 16:33:14 [INFO|DP=3|PP=0|TP=0|ip-26-0-169-207]: No checkpoint path provided.
156
+ [default5]:07/02/2024 16:33:14 [INFO|DP=3|PP=0|TP=1|ip-26-0-169-207]: No checkpoint path provided.
157
+ [default7]:07/02/2024 16:33:14 [INFO|DP=3|PP=0|TP=3|ip-26-0-169-207]: No checkpoint path provided.
158
+ [default0]:07/02/2024 16:33:16 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [Optimizer Building] Using LearningRateForSP as learning rate
159
+ [default0]:07/02/2024 16:33:16 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [ZeRO sharding] Size of optimizer params per rank:
160
+ [default0]:07/02/2024 16:33:16 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [ZeRO sharding] DP Rank 0 has 69.4M out of 277M (25.00%) params' optimizer states
161
+ [default0]:07/02/2024 16:33:16 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [ZeRO sharding] DP Rank 1 has 69.4M out of 277M (25.00%) params' optimizer states
162
+ [default0]:07/02/2024 16:33:16 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [ZeRO sharding] DP Rank 2 has 69.4M out of 277M (25.00%) params' optimizer states
163
+ [default0]:07/02/2024 16:33:16 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [ZeRO sharding] DP Rank 3 has 69.4M out of 277M (25.00%) params' optimizer states
164
+ [default0]:07/02/2024 16:33:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
165
+ [default0]:07/02/2024 16:33:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Using `datasets` library
166
+ [default0]:07/02/2024 16:33:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
167
+ [default0]:07/02/2024 16:33:18 [WARNING|DP=0|PP=0|TP=0|ip-26-0-163-43]: Repo card metadata block was not found. Setting CardData to empty.
168
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
169
+ [default0]:07/02/2024 16:33:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [Training Plan] There are 1 training stages
170
+ [default0]:07/02/2024 16:33:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [Stage Training Stage] start from step 1
171
+ [default0]:07/02/2024 16:33:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]:
172
+ [default0]:07/02/2024 16:33:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: [Start training] datetime: 2024-07-02 16:33:19.645675 | mbs: 8 | grad_accum: 32 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
173
+ [default0]:07/02/2024 16:33:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
174
+ [default0]:07/02/2024 16:33:19 [INFO|DP=0|PP=0|TP=0|ip-26-0-163-43]: Memory usage: 1877.40MiB. Peak allocated 1877.40MiB. Peak reserved: 1934.00MiB
175
+ [default1]:07/02/2024 16:33:19 [WARNING|DP=0|PP=0|TP=1|ip-26-0-163-43]: Repo card metadata block was not found. Setting CardData to empty.
176
+ [default5]:07/02/2024 16:33:19 [WARNING|DP=1|PP=0|TP=1|ip-26-0-163-43]: Repo card metadata block was not found. Setting CardData to empty.
177
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
178
+ [default6]:07/02/2024 16:33:19 [WARNING|DP=3|PP=0|TP=2|ip-26-0-169-207]: Repo card metadata block was not found. Setting CardData to empty.
179
+ [default2]:07/02/2024 16:33:19 [WARNING|DP=2|PP=0|TP=2|ip-26-0-169-207]: Repo card metadata block was not found. Setting CardData to empty.
180
+ [default3]:07/02/2024 16:33:19 [WARNING|DP=2|PP=0|TP=3|ip-26-0-169-207]: Repo card metadata block was not found. Setting CardData to empty.
181
+ [default0]:07/02/2024 16:33:19 [WARNING|DP=2|PP=0|TP=0|ip-26-0-169-207]: Repo card metadata block was not found. Setting CardData to empty.
182
+ [default1]:07/02/2024 16:33:19 [WARNING|DP=2|PP=0|TP=1|ip-26-0-169-207]: Repo card metadata block was not found. Setting CardData to empty.
183
+ [default4]:07/02/2024 16:33:19 [WARNING|DP=3|PP=0|TP=0|ip-26-0-169-207]: Repo card metadata block was not found. Setting CardData to empty.
184
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
185
+ [default5]:07/02/2024 16:33:19 [WARNING|DP=3|PP=0|TP=1|ip-26-0-169-207]: Repo card metadata block was not found. Setting CardData to empty.
186
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
187
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
188
+ [default6]:07/02/2024 16:33:19 [WARNING|DP=1|PP=0|TP=2|ip-26-0-163-43]: Repo card metadata block was not found. Setting CardData to empty.
189
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
190
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
191
+ [default2]:07/02/2024 16:33:19 [WARNING|DP=0|PP=0|TP=2|ip-26-0-163-43]: Repo card metadata block was not found. Setting CardData to empty.
192
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
193
+ [default4]:07/02/2024 16:33:19 [WARNING|DP=1|PP=0|TP=0|ip-26-0-163-43]: Repo card metadata block was not found. Setting CardData to empty.
194
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
195
+ [default7]:07/02/2024 16:33:19 [WARNING|DP=1|PP=0|TP=3|ip-26-0-163-43]: Repo card metadata block was not found. Setting CardData to empty.
196
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
197
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
198
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
199
+ [default7]:07/02/2024 16:33:19 [WARNING|DP=3|PP=0|TP=3|ip-26-0-169-207]: Repo card metadata block was not found. Setting CardData to empty.
200
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
201
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
202
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
203
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
204
+ [default3]:07/02/2024 16:33:20 [WARNING|DP=0|PP=0|TP=3|ip-26-0-163-43]: Repo card metadata block was not found. Setting CardData to empty.
205
+ [default7]:[rank15]: OSError: [Errno 122] Disk quota exceeded
206
+ [default7]:
207
+ [default7]:[rank15]: During handling of the above exception, another exception occurred:
208
+ [default7]:
209
+ [default7]:[rank15]: Traceback (most recent call last):
210
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
211
+ [default7]:[rank15]: trainer.train(dataloader)
212
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
213
+ [default7]:[rank15]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
214
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
215
+ [default7]:[rank15]: outputs = self.pipeline_engine.train_batch_iter(
216
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
217
+ [default7]:[rank15]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
218
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
219
+ [default7]:[rank15]: output = model(**micro_batch)
220
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
221
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
222
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
223
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
224
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
225
+ [default7]:[rank15]: sharded_logits = self.model(
226
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
227
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
228
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
229
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
230
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
231
+ [default7]:[rank15]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
232
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
233
+ [default7]:[rank15]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
234
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
235
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
236
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
237
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
238
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
239
+ [default7]:[rank15]: output = self.pp_block(**new_kwargs)
240
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
241
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
242
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
243
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
244
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
245
+ [default7]:[rank15]: hidden_states = self.input_layernorm(hidden_states)
246
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
247
+ [default7]:[rank15]: return self._call_impl(*args, **kwargs)
248
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
249
+ [default7]:[rank15]: return forward_call(*args, **kwargs)
250
+ [default7]:[rank15]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
251
+ [default7]:[rank15]: return layer_norm_fn(
252
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
253
+ [default7]:[rank15]: return LayerNormFn.apply(
254
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
255
+ [default7]:[rank15]: return super().apply(*args, **kwargs) # type: ignore[misc]
256
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
257
+ [default7]:[rank15]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
258
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
259
+ [default7]:[rank15]: _layer_norm_fwd_1pass_kernel[(M,)](
260
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
261
+ [default7]:[rank15]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
262
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
263
+ [default7]:[rank15]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
264
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
265
+ [default7]:[rank15]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
266
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
267
+ [default7]:[rank15]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
268
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
269
+ [default7]:[rank15]: fn()
270
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
271
+ [default7]:[rank15]: self.fn.run(
272
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
273
+ [default7]:[rank15]: return self.fn.run(*args, **kwargs)
274
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
275
+ [default7]:[rank15]: return self.fn.run(*args, **kwargs)
276
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
277
+ [default7]:[rank15]: return self.fn.run(*args, **kwargs)
278
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
279
+ [default7]:[rank15]: self.cache[device][key] = compile(
280
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
281
+ [default7]:[rank15]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
282
+ [default7]:[rank15]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
283
+ [default7]:[rank15]: with open(temp_path, mode) as f:
284
+ [default7]:[rank15]: OSError: [Errno 122] Disk quota exceeded
285
+ [default4]:[rank12]: OSError: [Errno 122] Disk quota exceeded
286
+ [default4]:
287
+ [default4]:[rank12]: During handling of the above exception, another exception occurred:
288
+ [default4]:
289
+ [default4]:[rank12]: Traceback (most recent call last):
290
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
291
+ [default4]:[rank12]: trainer.train(dataloader)
292
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
293
+ [default4]:[rank12]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
294
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
295
+ [default4]:[rank12]: outputs = self.pipeline_engine.train_batch_iter(
296
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
297
+ [default4]:[rank12]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
298
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
299
+ [default4]:[rank12]: output = model(**micro_batch)
300
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
301
+ [default4]:[rank12]: return self._call_impl(*args, **kwargs)
302
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
303
+ [default4]:[rank12]: return forward_call(*args, **kwargs)
304
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
305
+ [default4]:[rank12]: sharded_logits = self.model(
306
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
307
+ [default4]:[rank12]: return self._call_impl(*args, **kwargs)
308
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
309
+ [default4]:[rank12]: return forward_call(*args, **kwargs)
310
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
311
+ [default4]:[rank12]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
312
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
313
+ [default4]:[rank12]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
314
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
315
+ [default4]:[rank12]: return self._call_impl(*args, **kwargs)
316
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
317
+ [default4]:[rank12]: return forward_call(*args, **kwargs)
318
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
319
+ [default4]:[rank12]: output = self.pp_block(**new_kwargs)
320
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
321
+ [default4]:[rank12]: return self._call_impl(*args, **kwargs)
322
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
323
+ [default4]:[rank12]: return forward_call(*args, **kwargs)
324
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
325
+ [default4]:[rank12]: hidden_states = self.input_layernorm(hidden_states)
326
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
327
+ [default4]:[rank12]: return self._call_impl(*args, **kwargs)
328
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
329
+ [default4]:[rank12]: return forward_call(*args, **kwargs)
330
+ [default4]:[rank12]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
331
+ [default4]:[rank12]: return layer_norm_fn(
332
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
333
+ [default4]:[rank12]: return LayerNormFn.apply(
334
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
335
+ [default4]:[rank12]: return super().apply(*args, **kwargs) # type: ignore[misc]
336
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
337
+ [default4]:[rank12]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
338
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
339
+ [default4]:[rank12]: _layer_norm_fwd_1pass_kernel[(M,)](
340
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
341
+ [default4]:[rank12]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
342
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
343
+ [default4]:[rank12]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
344
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
345
+ [default4]:[rank12]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
346
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
347
+ [default4]:[rank12]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
348
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
349
+ [default4]:[rank12]: fn()
350
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
351
+ [default4]:[rank12]: self.fn.run(
352
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
353
+ [default4]:[rank12]: return self.fn.run(*args, **kwargs)
354
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
355
+ [default4]:[rank12]: return self.fn.run(*args, **kwargs)
356
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
357
+ [default4]:[rank12]: return self.fn.run(*args, **kwargs)
358
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
359
+ [default4]:[rank12]: self.cache[device][key] = compile(
360
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
361
+ [default4]:[rank12]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
362
+ [default4]:[rank12]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
363
+ [default4]:[rank12]: with open(temp_path, mode) as f:
364
+ [default4]:[rank12]: OSError: [Errno 122] Disk quota exceeded
365
+ [default1]:[rank1]: OSError: [Errno 122] Disk quota exceeded
366
+ [default1]:
367
+ [default1]:[rank1]: During handling of the above exception, another exception occurred:
368
+ [default1]:
369
+ [default1]:[rank1]: Traceback (most recent call last):
370
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
371
+ [default1]:[rank1]: trainer.train(dataloader)
372
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
373
+ [default1]:[rank1]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
374
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
375
+ [default1]:[rank1]: outputs = self.pipeline_engine.train_batch_iter(
376
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
377
+ [default1]:[rank1]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
378
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
379
+ [default1]:[rank1]: output = model(**micro_batch)
380
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
381
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
382
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
383
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
384
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
385
+ [default1]:[rank1]: sharded_logits = self.model(
386
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
387
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
388
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
389
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
390
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
391
+ [default1]:[rank1]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
392
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
393
+ [default1]:[rank1]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
394
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
395
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
396
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
397
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
398
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
399
+ [default1]:[rank1]: output = self.pp_block(**new_kwargs)
400
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
401
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
402
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
403
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
404
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
405
+ [default1]:[rank1]: hidden_states = self.input_layernorm(hidden_states)
406
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
407
+ [default1]:[rank1]: return self._call_impl(*args, **kwargs)
408
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
409
+ [default1]:[rank1]: return forward_call(*args, **kwargs)
410
+ [default1]:[rank1]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
411
+ [default1]:[rank1]: return layer_norm_fn(
412
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
413
+ [default1]:[rank1]: return LayerNormFn.apply(
414
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
415
+ [default1]:[rank1]: return super().apply(*args, **kwargs) # type: ignore[misc]
416
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
417
+ [default1]:[rank1]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
418
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
419
+ [default1]:[rank1]: _layer_norm_fwd_1pass_kernel[(M,)](
420
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
421
+ [default1]:[rank1]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
422
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
423
+ [default1]:[rank1]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
424
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
425
+ [default1]:[rank1]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
426
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
427
+ [default1]:[rank1]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
428
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
429
+ [default1]:[rank1]: fn()
430
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
431
+ [default1]:[rank1]: self.fn.run(
432
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
433
+ [default1]:[rank1]: return self.fn.run(*args, **kwargs)
434
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
435
+ [default1]:[rank1]: return self.fn.run(*args, **kwargs)
436
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
437
+ [default1]:[rank1]: return self.fn.run(*args, **kwargs)
438
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
439
+ [default1]:[rank1]: self.cache[device][key] = compile(
440
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
441
+ [default1]:[rank1]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
442
+ [default1]:[rank1]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
443
+ [default1]:[rank1]: with open(temp_path, mode) as f:
444
+ [default1]:[rank1]: OSError: [Errno 122] Disk quota exceeded
445
+ [default4]:[rank4]: OSError: [Errno 122] Disk quota exceeded
446
+ [default4]:
447
+ [default4]:[rank4]: During handling of the above exception, another exception occurred:
448
+ [default4]:
449
+ [default4]:[rank4]: Traceback (most recent call last):
450
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
451
+ [default4]:[rank4]: trainer.train(dataloader)
452
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
453
+ [default4]:[rank4]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
454
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
455
+ [default4]:[rank4]: outputs = self.pipeline_engine.train_batch_iter(
456
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 295, in train_batch_iter
457
+ [default4]:[rank4]: self.backward(context=context, state=state, grad_accumulator=grad_accumulator)
458
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 86, in backward
459
+ [default4]:[rank4]: grad_accumulator.backward(sum(activations))
460
+ [default4]:[rank4]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/optim/gradient_accumulator.py", line 205, in backward
461
+ [default4]:[rank4]: result = loss.backward()
462
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/_tensor.py", line 525, in backward
463
+ [default4]:[rank4]: torch.autograd.backward(
464
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/__init__.py", line 267, in backward
465
+ [default4]:[rank4]: _engine_run_backward(
466
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py", line 744, in _engine_run_backward
467
+ [default4]:[rank4]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
468
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 301, in apply
469
+ [default4]:[rank4]: return user_fn(self, *args)
470
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 821, in backward
471
+ [default4]:[rank4]: dx, dw, db, dresidual_in, dx1, dw1, db1 = _layer_norm_bwd(
472
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 643, in _layer_norm_bwd
473
+ [default4]:[rank4]: _layer_norm_bwd_kernel[grid](
474
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
475
+ [default4]:[rank4]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
476
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
477
+ [default4]:[rank4]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
478
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
479
+ [default4]:[rank4]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
480
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
481
+ [default4]:[rank4]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
482
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
483
+ [default4]:[rank4]: fn()
484
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
485
+ [default4]:[rank4]: self.fn.run(
486
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
487
+ [default4]:[rank4]: return self.fn.run(*args, **kwargs)
488
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
489
+ [default4]:[rank4]: return self.fn.run(*args, **kwargs)
490
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
491
+ [default4]:[rank4]: return self.fn.run(*args, **kwargs)
492
+ [default4]:[rank4]: [Previous line repeated 2 more times]
493
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
494
+ [default4]:[rank4]: self.cache[device][key] = compile(
495
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
496
+ [default4]:[rank4]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
497
+ [default4]:[rank4]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
498
+ [default4]:[rank4]: with open(temp_path, mode) as f:
499
+ [default4]:[rank4]: OSError: [Errno 122] Disk quota exceeded
500
+ [default5]:[rank13]: OSError: [Errno 122] Disk quota exceeded
501
+ [default5]:
502
+ [default5]:[rank13]: During handling of the above exception, another exception occurred:
503
+ [default5]:
504
+ [default5]:[rank13]: Traceback (most recent call last):
505
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
506
+ [default5]:[rank13]: trainer.train(dataloader)
507
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
508
+ [default5]:[rank13]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
509
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
510
+ [default5]:[rank13]: outputs = self.pipeline_engine.train_batch_iter(
511
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
512
+ [default5]:[rank13]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
513
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
514
+ [default5]:[rank13]: output = model(**micro_batch)
515
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
516
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
517
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
518
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
519
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
520
+ [default5]:[rank13]: sharded_logits = self.model(
521
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
522
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
523
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
524
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
525
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
526
+ [default5]:[rank13]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
527
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
528
+ [default5]:[rank13]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
529
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
530
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
531
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
532
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
533
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
534
+ [default5]:[rank13]: output = self.pp_block(**new_kwargs)
535
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
536
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
537
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
538
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
539
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
540
+ [default5]:[rank13]: hidden_states = self.input_layernorm(hidden_states)
541
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
542
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
543
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
544
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
545
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
546
+ [default5]:[rank13]: return layer_norm_fn(
547
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
548
+ [default5]:[rank13]: return LayerNormFn.apply(
549
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
550
+ [default5]:[rank13]: return super().apply(*args, **kwargs) # type: ignore[misc]
551
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
552
+ [default5]:[rank13]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
553
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
554
+ [default5]:[rank13]: _layer_norm_fwd_1pass_kernel[(M,)](
555
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
556
+ [default5]:[rank13]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
557
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
558
+ [default5]:[rank13]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
559
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
560
+ [default5]:[rank13]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
561
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
562
+ [default5]:[rank13]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
563
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
564
+ [default5]:[rank13]: fn()
565
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
566
+ [default5]:[rank13]: self.fn.run(
567
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
568
+ [default5]:[rank13]: return self.fn.run(*args, **kwargs)
569
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
570
+ [default5]:[rank13]: return self.fn.run(*args, **kwargs)
571
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
572
+ [default5]:[rank13]: return self.fn.run(*args, **kwargs)
573
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
574
+ [default5]:[rank13]: self.cache[device][key] = compile(
575
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
576
+ [default5]:[rank13]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
577
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
578
+ [default5]:[rank13]: with open(temp_path, mode) as f:
579
+ [default5]:[rank13]: OSError: [Errno 122] Disk quota exceeded
580
+ [default2]:[rank10]: OSError: [Errno 122] Disk quota exceeded
581
+ [default2]:
582
+ [default2]:[rank10]: During handling of the above exception, another exception occurred:
583
+ [default2]:
584
+ [default2]:[rank10]: Traceback (most recent call last):
585
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
586
+ [default2]:[rank10]: trainer.train(dataloader)
587
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
588
+ [default2]:[rank10]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
589
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
590
+ [default2]:[rank10]: outputs = self.pipeline_engine.train_batch_iter(
591
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
592
+ [default2]:[rank10]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
593
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
594
+ [default2]:[rank10]: output = model(**micro_batch)
595
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
596
+ [default2]:[rank10]: return self._call_impl(*args, **kwargs)
597
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
598
+ [default2]:[rank10]: return forward_call(*args, **kwargs)
599
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
600
+ [default2]:[rank10]: sharded_logits = self.model(
601
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
602
+ [default2]:[rank10]: return self._call_impl(*args, **kwargs)
603
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
604
+ [default2]:[rank10]: return forward_call(*args, **kwargs)
605
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
606
+ [default2]:[rank10]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
607
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
608
+ [default2]:[rank10]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
609
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
610
+ [default2]:[rank10]: return self._call_impl(*args, **kwargs)
611
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
612
+ [default2]:[rank10]: return forward_call(*args, **kwargs)
613
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
614
+ [default2]:[rank10]: output = self.pp_block(**new_kwargs)
615
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
616
+ [default2]:[rank10]: return self._call_impl(*args, **kwargs)
617
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
618
+ [default2]:[rank10]: return forward_call(*args, **kwargs)
619
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
620
+ [default2]:[rank10]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
621
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
622
+ [default2]:[rank10]: return self._call_impl(*args, **kwargs)
623
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
624
+ [default2]:[rank10]: return forward_call(*args, **kwargs)
625
+ [default2]:[rank10]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 566, in forward
626
+ [default2]:[rank10]: query_states, key_value_states = self.flash_rotary_embedding(query_states, kv=key_value_states)
627
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
628
+ [default2]:[rank10]: return self._call_impl(*args, **kwargs)
629
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
630
+ [default2]:[rank10]: return forward_call(*args, **kwargs)
631
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 457, in forward
632
+ [default2]:[rank10]: q = apply_rotary_emb_func(
633
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 122, in apply_rotary_emb
634
+ [default2]:[rank10]: return ApplyRotaryEmb.apply(
635
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
636
+ [default2]:[rank10]: return super().apply(*args, **kwargs) # type: ignore[misc]
637
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 48, in forward
638
+ [default2]:[rank10]: out = apply_rotary(
639
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/rotary.py", line 202, in apply_rotary
640
+ [default2]:[rank10]: rotary_kernel[grid](
641
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
642
+ [default2]:[rank10]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
643
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
644
+ [default2]:[rank10]: self.cache[device][key] = compile(
645
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
646
+ [default2]:[rank10]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
647
+ [default2]:[rank10]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
648
+ [default2]:[rank10]: with open(temp_path, mode) as f:
649
+ [default2]:[rank10]: OSError: [Errno 122] Disk quota exceeded
650
+ W0702 16:33:28.970000 139765262067520 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 892154 closing signal SIGTERM
651
+ W0702 16:33:28.975000 139765262067520 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 892156 closing signal SIGTERM
652
+ W0702 16:33:28.978000 139765262067520 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 892157 closing signal SIGTERM
653
+ W0702 16:33:28.983000 139765262067520 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 892159 closing signal SIGTERM
654
+ W0702 16:33:29.007000 139765262067520 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 892160 closing signal SIGTERM
655
+ W0702 16:33:29.049000 139765262067520 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 892161 closing signal SIGTERM
656
+ E0702 16:33:31.253000 139765262067520 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 1 (pid: 892155) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
657
+ Traceback (most recent call last):
658
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
659
+ sys.exit(main())
660
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
661
+ return f(*args, **kwargs)
662
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
663
+ run(args)
664
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
665
+ elastic_launch(
666
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
667
+ return launch_agent(self._config, self._entrypoint, list(args))
668
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
669
+ raise ChildFailedError(
670
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
671
+ ============================================================
672
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
673
+ ------------------------------------------------------------
674
+ Failures:
675
+ [1]:
676
+ time : 2024-07-02_16:33:28
677
+ host : ip-26-0-163-43.ec2.internal
678
+ rank : 4 (local_rank: 4)
679
+ exitcode : 1 (pid: 892158)
680
+ error_file: <N/A>
681
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
682
+ ------------------------------------------------------------
683
+ Root Cause (first observed failure):
684
+ [0]:
685
+ time : 2024-07-02_16:33:28
686
+ host : ip-26-0-163-43.ec2.internal
687
+ rank : 1 (local_rank: 1)
688
+ exitcode : 1 (pid: 892155)
689
+ error_file: <N/A>
690
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
691
+ ============================================================
692
+ srun: error: ip-26-0-163-43: task 0: Exited with exit code 1
693
+ W0702 16:33:33.002000 140342280533760 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1252] The node 'ip-26-0-169-207.ec2.internal_2461009_0' has failed to send a keep-alive heartbeat to the rendezvous 'none' due to an error of type RendezvousConnectionError.
694
+ W0702 16:33:33.979000 140347947353920 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2461079 closing signal SIGTERM
695
+ W0702 16:33:33.984000 140347947353920 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2461080 closing signal SIGTERM
696
+ W0702 16:33:33.986000 140347947353920 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2461082 closing signal SIGTERM
697
+ W0702 16:33:33.991000 140347947353920 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 2461085 closing signal SIGTERM
698
+ E0702 16:33:35.901000 140347947353920 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 2 (pid: 2461081) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
699
+ W0702 16:33:35.907000 140347947353920 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-169-207.ec2.internal_2461009_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
700
+ W0702 16:33:35.933000 140347947353920 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-169-207.ec2.internal_2461009_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
701
+ W0702 16:33:35.951000 140347947353920 torch/distributed/elastic/rendezvous/dynamic_rendezvous.py:1203] The node 'ip-26-0-169-207.ec2.internal_2461009_0' has failed to shutdown the rendezvous 'none' due to an error of type RendezvousConnectionError.
702
+ Traceback (most recent call last):
703
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
704
+ sys.exit(main())
705
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
706
+ return f(*args, **kwargs)
707
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
708
+ run(args)
709
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
710
+ elastic_launch(
711
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
712
+ return launch_agent(self._config, self._entrypoint, list(args))
713
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
714
+ raise ChildFailedError(
715
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
716
+ ============================================================
717
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
718
+ ------------------------------------------------------------
719
+ Failures:
720
+ [1]:
721
+ time : 2024-07-02_16:33:33
722
+ host : ip-26-0-169-207.ec2.internal
723
+ rank : 12 (local_rank: 4)
724
+ exitcode : 1 (pid: 2461083)
725
+ error_file: <N/A>
726
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
727
+ [2]:
728
+ time : 2024-07-02_16:33:33
729
+ host : ip-26-0-169-207.ec2.internal
730
+ rank : 13 (local_rank: 5)
731
+ exitcode : 1 (pid: 2461084)
732
+ error_file: <N/A>
733
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
734
+ [3]:
735
+ time : 2024-07-02_16:33:33
736
+ host : ip-26-0-169-207.ec2.internal
737
+ rank : 15 (local_rank: 7)
738
+ exitcode : 1 (pid: 2461086)
739
+ error_file: <N/A>
740
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
741
+ ------------------------------------------------------------
742
+ Root Cause (first observed failure):
743
+ [0]:
744
+ time : 2024-07-02_16:33:33
745
+ host : ip-26-0-169-207.ec2.internal
746
+ rank : 10 (local_rank: 2)
747
+ exitcode : 1 (pid: 2461081)
748
+ error_file: <N/A>
749
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
750
+ ============================================================
751
+ srun: error: ip-26-0-169-207: task 1: Exited with exit code 1
752
+ Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-8/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ fail