3outeille HF staff commited on
Commit
c35eb60
·
verified ·
1 Parent(s): 4ea5c2d

Upload llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4

Browse files
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/bench.slurm ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ #SBATCH --job-name=bench_cluster
4
+ #SBATCH --time=00:59:00
5
+ #SBATCH --partition=hopper-prod
6
+ #SBATCH --nodes=2
7
+ #SBATCH --gres=gpu:8
8
+ #SBATCH --qos=high
9
+ #SBATCH --ntasks-per-node=1
10
+ #SBATCH --cpus-per-task=96
11
+ #SBATCH --exclusive
12
+ #SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/log.out
13
+ #SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/log.out
14
+
15
+ # Function to update status based on squeue output
16
+ update_status() {
17
+ job_id=$1
18
+ status_file=$2
19
+ # For unknown reasons, it doenst update status for pending. It only works for running
20
+ while true; do
21
+ job_status=$(squeue --job $job_id --noheader --format=%T)
22
+ echo "Job status: $job_status"
23
+ if [ -z "$job_status" ]; then
24
+ # Job has finished or is not found
25
+ break
26
+ elif [ "$job_status" = "RUNNING" ]; then
27
+ printf "running" > $status_file
28
+ break
29
+ fi
30
+ sleep 10
31
+ done
32
+ }
33
+
34
+ # Misc initializations.
35
+ echo "========================"
36
+ echo "START TIME: $(date)"
37
+ source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
38
+ conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
39
+ echo python3 version = $(python3 --version)
40
+ echo "========================"
41
+
42
+ # Slurm stuff
43
+ export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
44
+ export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
45
+ export MASTER_PORT=$((1024 + RANDOM % 64511))
46
+
47
+ export TMPDIR=/scratch
48
+ export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
49
+ export CUBLAS_WORKSPACE_CONFIG=":4096:8"
50
+ export CUDA_DEVICE_MAX_CONNECTIONS="1"
51
+
52
+ huggingface-cli login --token $HUGGINGFACE_TOKEN
53
+
54
+
55
+ NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
56
+ CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/config.yaml"
57
+
58
+ LAUNCHER="torchrun \
59
+ --nproc_per_node 8 \
60
+ --nnodes 2 \
61
+ --rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
62
+ --rdzv_backend c10d \
63
+ --max_restarts 0 \
64
+ --tee 3 \
65
+ --node_rank ${SLURM_PROCID}"
66
+
67
+ # Checkout the bench_cluster branch
68
+ cd $NANOTRON_REPO
69
+ git checkout bench_cluster
70
+ cd ..
71
+ # Get the current job ID
72
+ job_id=${SLURM_JOB_ID}
73
+
74
+ # Update status to "pending" or "running" in the background
75
+ update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/status.txt &
76
+
77
+ # Run the main command
78
+ srun -u $LAUNCHER $CMD
79
+ exit_status=$?
80
+
81
+ # Update status based on the exit status of `srun`
82
+ if [ $exit_status -eq 0 ]; then
83
+ printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/status.txt
84
+ else
85
+ if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/log.out; then
86
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/status.txt
87
+ elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/log.out; then
88
+ printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/status.txt
89
+ elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/log.out; then
90
+ printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/status.txt
91
+ else
92
+ printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/status.txt
93
+ fi
94
+ fi
95
+
96
+ # Run the report script if the job completed successfully
97
+ if [ $exit_status -eq 0 ]; then
98
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4 --is_logs
99
+ python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4 --is_profiler
100
+ fi
101
+
102
+
103
+ # Push to hub the folder using huggingface_cli
104
+ huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4 llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4 --commit-message "Upload llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4"
105
+
106
+ # Verify the upload
107
+ if [ $? -eq 0 ]; then
108
+ echo "Uploading to Huggingface Hub successful"
109
+ else
110
+ echo "Failed to upload to Huggingface Hub"
111
+ fi
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/config.yaml ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ general:
2
+ project: bench_cluster
3
+ seed: 42
4
+ model:
5
+ ddp_bucket_cap_mb: 25
6
+ dtype: bfloat16
7
+ init_method:
8
+ std: 0.025
9
+ make_vocab_size_divisible_by: 1
10
+ model_config:
11
+ bos_token_id: 1
12
+ eos_token_id: 2
13
+ hidden_act: silu
14
+ hidden_size: 2048
15
+ initializer_range: 0.02
16
+ intermediate_size: 4096
17
+ is_llama_config: true
18
+ max_position_embeddings: 4096
19
+ num_attention_heads: 32
20
+ num_hidden_layers: 24
21
+ num_key_value_heads: 32
22
+ pad_token_id: null
23
+ pretraining_tp: 1
24
+ rms_norm_eps: 1.0e-05
25
+ rope_scaling: null
26
+ rope_theta: 10000.0
27
+ tie_word_embeddings: true
28
+ use_cache: true
29
+ vocab_size: 50257
30
+ optimizer:
31
+ accumulate_grad_in_fp32: true
32
+ clip_grad: 1.0
33
+ learning_rate_scheduler:
34
+ learning_rate: 0.0001
35
+ lr_decay_style: linear
36
+ lr_warmup_style: linear
37
+ lr_warmup_steps: 1
38
+ min_decay_lr: 1.0e-05
39
+ optimizer_factory:
40
+ adam_beta1: 0.9
41
+ adam_beta2: 0.95
42
+ adam_eps: 1.0e-08
43
+ name: adamW
44
+ torch_adam_is_fused: true
45
+ weight_decay: 0.01
46
+ zero_stage: 1
47
+ parallelism:
48
+ dp: 4
49
+ expert_parallel_size: 1
50
+ pp: 1
51
+ pp_engine: 1f1b
52
+ tp: 4
53
+ tp_linear_async_communication: false
54
+ tp_mode: REDUCE_SCATTER
55
+ profiler:
56
+ profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4
57
+ tokenizer:
58
+ tokenizer_max_length: null
59
+ tokenizer_name_or_path: openai-community/gpt2
60
+ tokenizer_revision: null
61
+ data_stages:
62
+ - name: Training Stage
63
+ start_training_step: 1
64
+ data:
65
+ dataset:
66
+ dataset_overwrite_cache: false
67
+ dataset_processing_num_proc_per_process: 64
68
+ hf_dataset_config_name: null
69
+ hf_dataset_or_datasets: roneneldan/TinyStories
70
+ hf_dataset_splits: train
71
+ text_column_name: text
72
+ num_loading_workers: 32
73
+ seed: 42
74
+ lighteval: null
75
+ tokens:
76
+ train_steps: 20
77
+ val_check_interval: -1
78
+ batch_accumulation_per_replica: 64
79
+ limit_test_batches: 0
80
+ limit_val_batches: 0
81
+ micro_batch_size: 4
82
+ sequence_length: 4096
83
+ logging:
84
+ iteration_step_info_interval: 1
85
+ log_level: info
86
+ log_level_replica: info
87
+ checkpoints:
88
+ checkpoint_interval: 100000
89
+ checkpoints_path: /dev/null
90
+ resume_checkpoint_path: null
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/log.out ADDED
@@ -0,0 +1,825 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ========================
2
+ START TIME: Tue Jul 2 16:22:58 UTC 2024
3
+ python3 version = Python 3.10.14
4
+ ========================
5
+ The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
6
+ Token is valid (permission: write).
7
+ OSError: [Errno 122] Disk quota exceeded
8
+
9
+ During handling of the above exception, another exception occurred:
10
+
11
+ Traceback (most recent call last):
12
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/huggingface-cli", line 8, in <module>
13
+ sys.exit(main())
14
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/huggingface_hub/commands/huggingface_cli.py", line 51, in main
15
+ service.run()
16
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/huggingface_hub/commands/user.py", line 98, in run
17
+ login(token=self.args.token, add_to_git_credential=self.args.add_to_git_credential)
18
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/huggingface_hub/_login.py", line 111, in login
19
+ _login(token, add_to_git_credential=add_to_git_credential, write_permission=write_permission)
20
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/huggingface_hub/_login.py", line 328, in _login
21
+ path.write_text(token)
22
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/pathlib.py", line 1154, in write_text
23
+ with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f:
24
+ OSError: [Errno 122] Disk quota exceeded
25
+ Already on 'bench_cluster'
26
+ M examples/config_tiny_llama.py
27
+ M examples/config_tiny_llama.yaml
28
+ M examples/train_tiny_llama.sh
29
+ M src/nanotron/models/llama.py
30
+ M src/nanotron/trainer.py
31
+ Your branch is up to date with 'origin/bench_cluster'.
32
+ Job status: RUNNING
33
+ W0702 16:23:00.555000 140654571476800 torch/distributed/run.py:757]
34
+ W0702 16:23:00.555000 140654571476800 torch/distributed/run.py:757] *****************************************
35
+ W0702 16:23:00.555000 140654571476800 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
36
+ W0702 16:23:00.555000 140654571476800 torch/distributed/run.py:757] *****************************************
37
+ W0702 16:23:00.578000 140166684469056 torch/distributed/run.py:757]
38
+ W0702 16:23:00.578000 140166684469056 torch/distributed/run.py:757] *****************************************
39
+ W0702 16:23:00.578000 140166684469056 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
40
+ W0702 16:23:00.578000 140166684469056 torch/distributed/run.py:757] *****************************************
41
+ [default0]:07/02/2024 16:23:18 [WARNING|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Vocab Size Padding] Padded vocab (size: 50257) with 3 dummy tokens (new size: 50260)
42
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config:
43
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config(general=GeneralArgs(project='bench_cluster',
44
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: run='%date_%jobid',
45
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
46
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: step=None,
47
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: consumed_train_samples=None,
48
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: benchmark_csv_path=None,
49
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ignore_sanity_checks=True),
50
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: parallelism=ParallelismArgs(dp=4,
51
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp=1,
52
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp=4,
53
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7fd02accc910>,
54
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
55
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_linear_async_communication=False,
56
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: expert_parallel_size=1),
57
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
58
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=2,
59
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
60
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=2048,
61
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
62
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=4096,
63
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
64
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
65
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=32,
66
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=24,
67
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=32,
68
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
69
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
70
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
71
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
72
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
73
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=True,
74
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
75
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=50260),
76
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: init_method=RandomInit(std=0.025),
77
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dtype=torch.bfloat16,
78
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: make_vocab_size_divisible_by=1,
79
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ddp_bucket_cap_mb=25),
80
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
81
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_revision=None,
82
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_max_length=None),
83
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
84
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoint_interval=100000,
85
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: save_initial_state=False,
86
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: resume_checkpoint_path=None,
87
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints_path_is_shared_file_system=False),
88
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: logging=LoggingArgs(log_level='info',
89
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: log_level_replica='info',
90
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration_step_info_interval=1),
91
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokens=TokensArgs(sequence_length=4096,
92
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: train_steps=20,
93
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: micro_batch_size=4,
94
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: batch_accumulation_per_replica=64,
95
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: val_check_interval=-1,
96
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_val_batches=0,
97
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_test_batches=0),
98
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
99
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta1=0.9,
100
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta2=0.95,
101
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: torch_adam_is_fused=True,
102
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: name='adamW'),
103
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: zero_stage=1,
104
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: weight_decay=0.01,
105
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: clip_grad=1.0,
106
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: accumulate_grad_in_fp32=True,
107
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
108
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_steps=1,
109
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_style='linear',
110
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_style='linear',
111
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_steps=19,
112
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_starting_step=None,
113
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: min_decay_lr=1e-05)),
114
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data_stages=[DatasetStageArgs(name='Training Stage',
115
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: start_training_step=1,
116
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
117
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hf_dataset_splits='train',
118
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hf_dataset_config_name=None,
119
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dataset_processing_num_proc_per_process=64,
120
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dataset_overwrite_cache=False,
121
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: text_column_name='text'),
122
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
123
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_loading_workers=32))],
124
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4')),
125
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lighteval=None)
126
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Model Config:
127
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: LlamaConfig(bos_token_id=1,
128
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=2,
129
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
130
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=2048,
131
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
132
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=4096,
133
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
134
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
135
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=32,
136
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=24,
137
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=32,
138
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
139
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
140
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
141
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
142
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
143
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=True,
144
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
145
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=50260)
146
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Building model..
147
+ [default0]:07/02/2024 16:23:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Setting PP block ranks...
148
+ [default0]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Total number of parameters: 1.11G (2117.09MiB)
149
+ [default0]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Local number of parameters: 277M (529.27MiB)
150
+ [default0]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
151
+ [default0]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: No checkpoint path provided.
152
+ [default0]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Parametrizing model parameters using StandardParametrizator
153
+ [default3]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-225]: Local number of parameters: 277M (529.27MiB)
154
+ [default3]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-225]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
155
+ [default3]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-225]: No checkpoint path provided.
156
+ [default1]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-225]: Local number of parameters: 277M (529.27MiB)
157
+ [default1]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-225]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
158
+ [default1]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-225]: No checkpoint path provided.
159
+ [default2]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-225]: Local number of parameters: 277M (529.27MiB)
160
+ [default2]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-225]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
161
+ [default2]:07/02/2024 16:23:31 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-225]: No checkpoint path provided.
162
+ [default5]:07/02/2024 16:23:32 [INFO|DP=1|PP=0|TP=1|ip-26-0-160-225]: No checkpoint path provided.
163
+ [default6]:07/02/2024 16:23:32 [INFO|DP=1|PP=0|TP=2|ip-26-0-160-225]: No checkpoint path provided.
164
+ [default4]:07/02/2024 16:23:32 [INFO|DP=1|PP=0|TP=0|ip-26-0-160-225]: No checkpoint path provided.
165
+ [default7]:07/02/2024 16:23:32 [INFO|DP=1|PP=0|TP=3|ip-26-0-160-225]: No checkpoint path provided.
166
+ [default0]:07/02/2024 16:23:32 [INFO|DP=2|PP=0|TP=0|ip-26-0-171-56]: No checkpoint path provided.
167
+ [default1]:07/02/2024 16:23:32 [INFO|DP=2|PP=0|TP=1|ip-26-0-171-56]: No checkpoint path provided.
168
+ [default2]:07/02/2024 16:23:32 [INFO|DP=2|PP=0|TP=2|ip-26-0-171-56]: No checkpoint path provided.
169
+ [default3]:07/02/2024 16:23:32 [INFO|DP=2|PP=0|TP=3|ip-26-0-171-56]: No checkpoint path provided.
170
+ [default6]:07/02/2024 16:23:32 [INFO|DP=3|PP=0|TP=2|ip-26-0-171-56]: No checkpoint path provided.
171
+ [default5]:07/02/2024 16:23:32 [INFO|DP=3|PP=0|TP=1|ip-26-0-171-56]: No checkpoint path provided.
172
+ [default4]:07/02/2024 16:23:32 [INFO|DP=3|PP=0|TP=0|ip-26-0-171-56]: No checkpoint path provided.
173
+ [default7]:07/02/2024 16:23:32 [INFO|DP=3|PP=0|TP=3|ip-26-0-171-56]: No checkpoint path provided.
174
+ [default0]:07/02/2024 16:23:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Optimizer Building] Using LearningRateForSP as learning rate
175
+ [default0]:07/02/2024 16:23:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] Size of optimizer params per rank:
176
+ [default0]:07/02/2024 16:23:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] DP Rank 0 has 69.4M out of 277M (25.00%) params' optimizer states
177
+ [default0]:07/02/2024 16:23:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] DP Rank 1 has 69.4M out of 277M (25.00%) params' optimizer states
178
+ [default0]:07/02/2024 16:23:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] DP Rank 2 has 69.4M out of 277M (25.00%) params' optimizer states
179
+ [default0]:07/02/2024 16:23:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] DP Rank 3 has 69.4M out of 277M (25.00%) params' optimizer states
180
+ [default0]:07/02/2024 16:23:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
181
+ [default0]:07/02/2024 16:23:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Using `datasets` library
182
+ [default0]:07/02/2024 16:23:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
183
+ [default0]:07/02/2024 16:23:36 [WARNING|DP=0|PP=0|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
184
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
185
+ [default0]:07/02/2024 16:23:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] There are 1 training stages
186
+ [default0]:07/02/2024 16:23:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Stage Training Stage] start from step 1
187
+ [default0]:07/02/2024 16:23:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]:
188
+ [default0]:07/02/2024 16:23:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Start training] datetime: 2024-07-02 16:23:37.289018 | mbs: 4 | grad_accum: 64 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
189
+ [default0]:07/02/2024 16:23:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
190
+ [default0]:07/02/2024 16:23:37 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 1877.40MiB. Peak allocated 1877.40MiB. Peak reserved: 1934.00MiB
191
+ [default6]:07/02/2024 16:23:37 [WARNING|DP=3|PP=0|TP=2|ip-26-0-171-56]: Repo card metadata block was not found. Setting CardData to empty.
192
+ [default0]:07/02/2024 16:23:37 [WARNING|DP=2|PP=0|TP=0|ip-26-0-171-56]: Repo card metadata block was not found. Setting CardData to empty.
193
+ [default5]:07/02/2024 16:23:37 [WARNING|DP=3|PP=0|TP=1|ip-26-0-171-56]: Repo card metadata block was not found. Setting CardData to empty.
194
+ [default1]:07/02/2024 16:23:37 [WARNING|DP=2|PP=0|TP=1|ip-26-0-171-56]: Repo card metadata block was not found. Setting CardData to empty.
195
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
196
+ [default4]:07/02/2024 16:23:37 [WARNING|DP=3|PP=0|TP=0|ip-26-0-171-56]: Repo card metadata block was not found. Setting CardData to empty.
197
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
198
+ [default2]:07/02/2024 16:23:37 [WARNING|DP=2|PP=0|TP=2|ip-26-0-171-56]: Repo card metadata block was not found. Setting CardData to empty.
199
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
200
+ [default3]:07/02/2024 16:23:37 [WARNING|DP=2|PP=0|TP=3|ip-26-0-171-56]: Repo card metadata block was not found. Setting CardData to empty.
201
+ [default0]:Repo card metadata block was not found. Setting CardData to empty.
202
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
203
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
204
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
205
+ [default5]:07/02/2024 16:23:37 [WARNING|DP=1|PP=0|TP=1|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
206
+ [default6]:07/02/2024 16:23:37 [WARNING|DP=1|PP=0|TP=2|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
207
+ [default3]:07/02/2024 16:23:37 [WARNING|DP=0|PP=0|TP=3|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
208
+ [default1]:07/02/2024 16:23:37 [WARNING|DP=0|PP=0|TP=1|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
209
+ [default7]:07/02/2024 16:23:37 [WARNING|DP=1|PP=0|TP=3|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
210
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
211
+ [default4]:07/02/2024 16:23:37 [WARNING|DP=1|PP=0|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
212
+ [default5]:Repo card metadata block was not found. Setting CardData to empty.
213
+ [default6]:Repo card metadata block was not found. Setting CardData to empty.
214
+ [default2]:Repo card metadata block was not found. Setting CardData to empty.
215
+ [default2]:07/02/2024 16:23:37 [WARNING|DP=0|PP=0|TP=2|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
216
+ [default1]:Repo card metadata block was not found. Setting CardData to empty.
217
+ [default4]:Repo card metadata block was not found. Setting CardData to empty.
218
+ [default3]:Repo card metadata block was not found. Setting CardData to empty.
219
+ [default7]:Repo card metadata block was not found. Setting CardData to empty.
220
+ [default7]:07/02/2024 16:23:37 [WARNING|DP=3|PP=0|TP=3|ip-26-0-171-56]: Repo card metadata block was not found. Setting CardData to empty.
221
+ [default0]:[rank8]: OSError: [Errno 122] Disk quota exceeded
222
+ [default0]:
223
+ [default0]:[rank8]: During handling of the above exception, another exception occurred:
224
+ [default0]:
225
+ [default0]:[rank8]: Traceback (most recent call last):
226
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
227
+ [default0]:[rank8]: trainer.train(dataloader)
228
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
229
+ [default0]:[rank8]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
230
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
231
+ [default0]:[rank8]: outputs = self.pipeline_engine.train_batch_iter(
232
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
233
+ [default0]:[rank8]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
234
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
235
+ [default0]:[rank8]: output = model(**micro_batch)
236
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
237
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
238
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
239
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
240
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
241
+ [default0]:[rank8]: sharded_logits = self.model(
242
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
243
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
244
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
245
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
246
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
247
+ [default0]:[rank8]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
248
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
249
+ [default0]:[rank8]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
250
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
251
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
252
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
253
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
254
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
255
+ [default0]:[rank8]: output = self.pp_block(**new_kwargs)
256
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
257
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
258
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
259
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
260
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
261
+ [default0]:[rank8]: hidden_states = self.input_layernorm(hidden_states)
262
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
263
+ [default0]:[rank8]: return self._call_impl(*args, **kwargs)
264
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
265
+ [default0]:[rank8]: return forward_call(*args, **kwargs)
266
+ [default0]:[rank8]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
267
+ [default0]:[rank8]: return layer_norm_fn(
268
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
269
+ [default0]:[rank8]: return LayerNormFn.apply(
270
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
271
+ [default0]:[rank8]: return super().apply(*args, **kwargs) # type: ignore[misc]
272
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
273
+ [default0]:[rank8]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
274
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
275
+ [default0]:[rank8]: _layer_norm_fwd_1pass_kernel[(M,)](
276
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
277
+ [default0]:[rank8]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
278
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
279
+ [default0]:[rank8]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
280
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
281
+ [default0]:[rank8]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
282
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
283
+ [default0]:[rank8]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
284
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
285
+ [default0]:[rank8]: fn()
286
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
287
+ [default0]:[rank8]: self.fn.run(
288
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
289
+ [default0]:[rank8]: return self.fn.run(*args, **kwargs)
290
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
291
+ [default0]:[rank8]: return self.fn.run(*args, **kwargs)
292
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
293
+ [default0]:[rank8]: return self.fn.run(*args, **kwargs)
294
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
295
+ [default0]:[rank8]: self.cache[device][key] = compile(
296
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
297
+ [default0]:[rank8]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
298
+ [default0]:[rank8]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
299
+ [default0]:[rank8]: with open(temp_path, mode) as f:
300
+ [default0]:[rank8]: OSError: [Errno 122] Disk quota exceeded
301
+ [default5]:[rank13]: OSError: [Errno 122] Disk quota exceeded
302
+ [default5]:
303
+ [default5]:[rank13]: During handling of the above exception, another exception occurred:
304
+ [default5]:
305
+ [default5]:[rank13]: Traceback (most recent call last):
306
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
307
+ [default5]:[rank13]: trainer.train(dataloader)
308
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
309
+ [default5]:[rank13]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
310
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
311
+ [default5]:[rank13]: outputs = self.pipeline_engine.train_batch_iter(
312
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
313
+ [default5]:[rank13]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
314
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
315
+ [default5]:[rank13]: output = model(**micro_batch)
316
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
317
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
318
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
319
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
320
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
321
+ [default5]:[rank13]: sharded_logits = self.model(
322
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
323
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
324
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
325
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
326
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
327
+ [default5]:[rank13]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
328
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
329
+ [default5]:[rank13]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
330
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
331
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
332
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
333
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
334
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
335
+ [default5]:[rank13]: output = self.pp_block(**new_kwargs)
336
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
337
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
338
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
339
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
340
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
341
+ [default5]:[rank13]: hidden_states = self.input_layernorm(hidden_states)
342
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
343
+ [default5]:[rank13]: return self._call_impl(*args, **kwargs)
344
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
345
+ [default5]:[rank13]: return forward_call(*args, **kwargs)
346
+ [default5]:[rank13]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
347
+ [default5]:[rank13]: return layer_norm_fn(
348
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
349
+ [default5]:[rank13]: return LayerNormFn.apply(
350
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
351
+ [default5]:[rank13]: return super().apply(*args, **kwargs) # type: ignore[misc]
352
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
353
+ [default5]:[rank13]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
354
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
355
+ [default5]:[rank13]: _layer_norm_fwd_1pass_kernel[(M,)](
356
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
357
+ [default5]:[rank13]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
358
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
359
+ [default5]:[rank13]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
360
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
361
+ [default5]:[rank13]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
362
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
363
+ [default5]:[rank13]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
364
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
365
+ [default5]:[rank13]: fn()
366
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
367
+ [default5]:[rank13]: self.fn.run(
368
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
369
+ [default5]:[rank13]: return self.fn.run(*args, **kwargs)
370
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
371
+ [default5]:[rank13]: return self.fn.run(*args, **kwargs)
372
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
373
+ [default5]:[rank13]: return self.fn.run(*args, **kwargs)
374
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
375
+ [default5]:[rank13]: self.cache[device][key] = compile(
376
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
377
+ [default5]:[rank13]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
378
+ [default5]:[rank13]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
379
+ [default5]:[rank13]: with open(temp_path, mode) as f:
380
+ [default5]:[rank13]: OSError: [Errno 122] Disk quota exceeded
381
+ [default3]:[rank11]: OSError: [Errno 122] Disk quota exceeded
382
+ [default3]:
383
+ [default3]:[rank11]: During handling of the above exception, another exception occurred:
384
+ [default3]:
385
+ [default3]:[rank11]: Traceback (most recent call last):
386
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
387
+ [default3]:[rank11]: trainer.train(dataloader)
388
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
389
+ [default3]:[rank11]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
390
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
391
+ [default3]:[rank11]: outputs = self.pipeline_engine.train_batch_iter(
392
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
393
+ [default3]:[rank11]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
394
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
395
+ [default3]:[rank11]: output = model(**micro_batch)
396
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
397
+ [default3]:[rank11]: return self._call_impl(*args, **kwargs)
398
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
399
+ [default3]:[rank11]: return forward_call(*args, **kwargs)
400
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
401
+ [default3]:[rank11]: sharded_logits = self.model(
402
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
403
+ [default3]:[rank11]: return self._call_impl(*args, **kwargs)
404
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
405
+ [default3]:[rank11]: return forward_call(*args, **kwargs)
406
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
407
+ [default3]:[rank11]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
408
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
409
+ [default3]:[rank11]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
410
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
411
+ [default3]:[rank11]: return self._call_impl(*args, **kwargs)
412
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
413
+ [default3]:[rank11]: return forward_call(*args, **kwargs)
414
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
415
+ [default3]:[rank11]: output = self.pp_block(**new_kwargs)
416
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
417
+ [default3]:[rank11]: return self._call_impl(*args, **kwargs)
418
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
419
+ [default3]:[rank11]: return forward_call(*args, **kwargs)
420
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
421
+ [default3]:[rank11]: hidden_states = self.input_layernorm(hidden_states)
422
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
423
+ [default3]:[rank11]: return self._call_impl(*args, **kwargs)
424
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
425
+ [default3]:[rank11]: return forward_call(*args, **kwargs)
426
+ [default3]:[rank11]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
427
+ [default3]:[rank11]: return layer_norm_fn(
428
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
429
+ [default3]:[rank11]: return LayerNormFn.apply(
430
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
431
+ [default3]:[rank11]: return super().apply(*args, **kwargs) # type: ignore[misc]
432
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
433
+ [default3]:[rank11]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
434
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
435
+ [default3]:[rank11]: _layer_norm_fwd_1pass_kernel[(M,)](
436
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
437
+ [default3]:[rank11]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
438
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
439
+ [default3]:[rank11]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
440
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
441
+ [default3]:[rank11]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
442
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
443
+ [default3]:[rank11]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
444
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
445
+ [default3]:[rank11]: fn()
446
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
447
+ [default3]:[rank11]: self.fn.run(
448
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
449
+ [default3]:[rank11]: return self.fn.run(*args, **kwargs)
450
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
451
+ [default3]:[rank11]: return self.fn.run(*args, **kwargs)
452
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
453
+ [default3]:[rank11]: return self.fn.run(*args, **kwargs)
454
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
455
+ [default3]:[rank11]: self.cache[device][key] = compile(
456
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
457
+ [default3]:[rank11]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
458
+ [default3]:[rank11]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
459
+ [default3]:[rank11]: with open(temp_path, mode) as f:
460
+ [default3]:[rank11]: OSError: [Errno 122] Disk quota exceeded
461
+ [default1]:[rank9]: OSError: [Errno 122] Disk quota exceeded
462
+ [default1]:
463
+ [default1]:[rank9]: During handling of the above exception, another exception occurred:
464
+ [default1]:
465
+ [default1]:[rank9]: Traceback (most recent call last):
466
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
467
+ [default1]:[rank9]: trainer.train(dataloader)
468
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
469
+ [default1]:[rank9]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
470
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
471
+ [default1]:[rank9]: outputs = self.pipeline_engine.train_batch_iter(
472
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
473
+ [default1]:[rank9]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
474
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
475
+ [default1]:[rank9]: output = model(**micro_batch)
476
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
477
+ [default1]:[rank9]: return self._call_impl(*args, **kwargs)
478
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
479
+ [default1]:[rank9]: return forward_call(*args, **kwargs)
480
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
481
+ [default1]:[rank9]: sharded_logits = self.model(
482
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
483
+ [default1]:[rank9]: return self._call_impl(*args, **kwargs)
484
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
485
+ [default1]:[rank9]: return forward_call(*args, **kwargs)
486
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
487
+ [default1]:[rank9]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
488
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
489
+ [default1]:[rank9]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
490
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
491
+ [default1]:[rank9]: return self._call_impl(*args, **kwargs)
492
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
493
+ [default1]:[rank9]: return forward_call(*args, **kwargs)
494
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
495
+ [default1]:[rank9]: output = self.pp_block(**new_kwargs)
496
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
497
+ [default1]:[rank9]: return self._call_impl(*args, **kwargs)
498
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
499
+ [default1]:[rank9]: return forward_call(*args, **kwargs)
500
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 629, in forward
501
+ [default1]:[rank9]: hidden_states = self.input_layernorm(hidden_states)
502
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
503
+ [default1]:[rank9]: return self._call_impl(*args, **kwargs)
504
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
505
+ [default1]:[rank9]: return forward_call(*args, **kwargs)
506
+ [default1]:[rank9]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/nn/layer_norm.py", line 42, in forward
507
+ [default1]:[rank9]: return layer_norm_fn(
508
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 875, in layer_norm_fn
509
+ [default1]:[rank9]: return LayerNormFn.apply(
510
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
511
+ [default1]:[rank9]: return super().apply(*args, **kwargs) # type: ignore[misc]
512
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 748, in forward
513
+ [default1]:[rank9]: y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
514
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 335, in _layer_norm_fwd
515
+ [default1]:[rank9]: _layer_norm_fwd_1pass_kernel[(M,)](
516
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
517
+ [default1]:[rank9]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
518
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
519
+ [default1]:[rank9]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
520
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
521
+ [default1]:[rank9]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
522
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
523
+ [default1]:[rank9]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
524
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
525
+ [default1]:[rank9]: fn()
526
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
527
+ [default1]:[rank9]: self.fn.run(
528
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
529
+ [default1]:[rank9]: return self.fn.run(*args, **kwargs)
530
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
531
+ [default1]:[rank9]: return self.fn.run(*args, **kwargs)
532
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
533
+ [default1]:[rank9]: return self.fn.run(*args, **kwargs)
534
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
535
+ [default1]:[rank9]: self.cache[device][key] = compile(
536
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
537
+ [default1]:[rank9]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
538
+ [default1]:[rank9]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
539
+ [default1]:[rank9]: with open(temp_path, mode) as f:
540
+ [default1]:[rank9]: OSError: [Errno 122] Disk quota exceeded
541
+ [default0]:[rank0]: OSError: [Errno 122] Disk quota exceeded
542
+ [default0]:
543
+ [default0]:[rank0]: During handling of the above exception, another exception occurred:
544
+ [default0]:
545
+ [default0]:[rank0]: Traceback (most recent call last):
546
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
547
+ [default0]:[rank0]: trainer.train(dataloader)
548
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
549
+ [default0]:[rank0]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
550
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
551
+ [default0]:[rank0]: outputs = self.pipeline_engine.train_batch_iter(
552
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 278, in train_batch_iter
553
+ [default0]:[rank0]: output = self.forward(context=context, state=state, micro_batch=micro_batch, model=model)
554
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 44, in forward
555
+ [default0]:[rank0]: output = model(**micro_batch)
556
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
557
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
558
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
559
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
560
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 891, in forward
561
+ [default0]:[rank0]: sharded_logits = self.model(
562
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
563
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
564
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
565
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
566
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 764, in forward
567
+ [default0]:[rank0]: return self.forward_with_hidden_states(input_ids=input_ids, input_mask=input_mask)[0]
568
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 780, in forward_with_hidden_states
569
+ [default0]:[rank0]: hidden_encoder_states = encoder_block(**hidden_encoder_states)
570
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
571
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
572
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
573
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
574
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/block.py", line 151, in forward
575
+ [default0]:[rank0]: output = self.pp_block(**new_kwargs)
576
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
577
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
578
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
579
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
580
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 631, in forward
581
+ [default0]:[rank0]: output = self.attn(hidden_states=hidden_states, sequence_mask=sequence_mask)
582
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
583
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
584
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
585
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
586
+ [default0]:[rank0]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/models/llama.py", line 566, in forward
587
+ [default0]:[rank0]: query_states, key_value_states = self.flash_rotary_embedding(query_states, kv=key_value_states)
588
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
589
+ [default0]:[rank0]: return self._call_impl(*args, **kwargs)
590
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
591
+ [default0]:[rank0]: return forward_call(*args, **kwargs)
592
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 457, in forward
593
+ [default0]:[rank0]: q = apply_rotary_emb_func(
594
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 122, in apply_rotary_emb
595
+ [default0]:[rank0]: return ApplyRotaryEmb.apply(
596
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 598, in apply
597
+ [default0]:[rank0]: return super().apply(*args, **kwargs) # type: ignore[misc]
598
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/layers/rotary.py", line 48, in forward
599
+ [default0]:[rank0]: out = apply_rotary(
600
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/rotary.py", line 202, in apply_rotary
601
+ [default0]:[rank0]: rotary_kernel[grid](
602
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
603
+ [default0]:[rank0]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
604
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
605
+ [default0]:[rank0]: self.cache[device][key] = compile(
606
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
607
+ [default0]:[rank0]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
608
+ [default0]:[rank0]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
609
+ [default0]:[rank0]: with open(temp_path, mode) as f:
610
+ [default0]:[rank0]: OSError: [Errno 122] Disk quota exceeded
611
+ [default5]:[rank5]: OSError: [Errno 122] Disk quota exceeded
612
+ [default5]:
613
+ [default5]:[rank5]: During handling of the above exception, another exception occurred:
614
+ [default5]:
615
+ [default5]:[rank5]: Traceback (most recent call last):
616
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
617
+ [default5]:[rank5]: trainer.train(dataloader)
618
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
619
+ [default5]:[rank5]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
620
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
621
+ [default5]:[rank5]: outputs = self.pipeline_engine.train_batch_iter(
622
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 295, in train_batch_iter
623
+ [default5]:[rank5]: self.backward(context=context, state=state, grad_accumulator=grad_accumulator)
624
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 86, in backward
625
+ [default5]:[rank5]: grad_accumulator.backward(sum(activations))
626
+ [default5]:[rank5]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/optim/gradient_accumulator.py", line 205, in backward
627
+ [default5]:[rank5]: result = loss.backward()
628
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/_tensor.py", line 525, in backward
629
+ [default5]:[rank5]: torch.autograd.backward(
630
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/__init__.py", line 267, in backward
631
+ [default5]:[rank5]: _engine_run_backward(
632
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py", line 744, in _engine_run_backward
633
+ [default5]:[rank5]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
634
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 301, in apply
635
+ [default5]:[rank5]: return user_fn(self, *args)
636
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 821, in backward
637
+ [default5]:[rank5]: dx, dw, db, dresidual_in, dx1, dw1, db1 = _layer_norm_bwd(
638
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 643, in _layer_norm_bwd
639
+ [default5]:[rank5]: _layer_norm_bwd_kernel[grid](
640
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
641
+ [default5]:[rank5]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
642
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
643
+ [default5]:[rank5]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
644
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
645
+ [default5]:[rank5]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
646
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
647
+ [default5]:[rank5]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
648
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
649
+ [default5]:[rank5]: fn()
650
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
651
+ [default5]:[rank5]: self.fn.run(
652
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
653
+ [default5]:[rank5]: return self.fn.run(*args, **kwargs)
654
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
655
+ [default5]:[rank5]: return self.fn.run(*args, **kwargs)
656
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
657
+ [default5]:[rank5]: return self.fn.run(*args, **kwargs)
658
+ [default5]:[rank5]: [Previous line repeated 2 more times]
659
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
660
+ [default5]:[rank5]: self.cache[device][key] = compile(
661
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
662
+ [default5]:[rank5]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
663
+ [default5]:[rank5]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
664
+ [default5]:[rank5]: with open(temp_path, mode) as f:
665
+ [default5]:[rank5]: OSError: [Errno 122] Disk quota exceeded
666
+ [default6]:[rank6]: OSError: [Errno 122] Disk quota exceeded
667
+ [default6]:
668
+ [default6]:[rank6]: During handling of the above exception, another exception occurred:
669
+ [default6]:
670
+ [default6]:[rank6]: Traceback (most recent call last):
671
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py", line 237, in <module>
672
+ [default6]:[rank6]: trainer.train(dataloader)
673
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 429, in train
674
+ [default6]:[rank6]: outputs, loss_avg = self.training_step(dataloader=self.current_dataloader)
675
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/trainer.py", line 462, in training_step
676
+ [default6]:[rank6]: outputs = self.pipeline_engine.train_batch_iter(
677
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 295, in train_batch_iter
678
+ [default6]:[rank6]: self.backward(context=context, state=state, grad_accumulator=grad_accumulator)
679
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/parallel/pipeline_parallel/engine.py", line 86, in backward
680
+ [default6]:[rank6]: grad_accumulator.backward(sum(activations))
681
+ [default6]:[rank6]: File "/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/src/nanotron/optim/gradient_accumulator.py", line 205, in backward
682
+ [default6]:[rank6]: result = loss.backward()
683
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/_tensor.py", line 525, in backward
684
+ [default6]:[rank6]: torch.autograd.backward(
685
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/__init__.py", line 267, in backward
686
+ [default6]:[rank6]: _engine_run_backward(
687
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py", line 744, in _engine_run_backward
688
+ [default6]:[rank6]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
689
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/function.py", line 301, in apply
690
+ [default6]:[rank6]: return user_fn(self, *args)
691
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 821, in backward
692
+ [default6]:[rank6]: dx, dw, db, dresidual_in, dx1, dw1, db1 = _layer_norm_bwd(
693
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/flash_attn/ops/triton/layer_norm.py", line 643, in _layer_norm_bwd
694
+ [default6]:[rank6]: _layer_norm_bwd_kernel[grid](
695
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 167, in <lambda>
696
+ [default6]:[rank6]: return lambda *args, **kwargs: self.run(grid=grid, warmup=False, *args, **kwargs)
697
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in run
698
+ [default6]:[rank6]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
699
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 143, in <dictcomp>
700
+ [default6]:[rank6]: timings = {config: self._bench(*args, config=config, **kwargs) for config in pruned_configs}
701
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 122, in _bench
702
+ [default6]:[rank6]: return do_bench(kernel_call, warmup=self.warmup, rep=self.rep, quantiles=(0.5, 0.2, 0.8))
703
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/testing.py", line 102, in do_bench
704
+ [default6]:[rank6]: fn()
705
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 110, in kernel_call
706
+ [default6]:[rank6]: self.fn.run(
707
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
708
+ [default6]:[rank6]: return self.fn.run(*args, **kwargs)
709
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
710
+ [default6]:[rank6]: return self.fn.run(*args, **kwargs)
711
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/autotuner.py", line 305, in run
712
+ [default6]:[rank6]: return self.fn.run(*args, **kwargs)
713
+ [default6]:[rank6]: [Previous line repeated 2 more times]
714
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/jit.py", line 416, in run
715
+ [default6]:[rank6]: self.cache[device][key] = compile(
716
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/compiler/compiler.py", line 194, in compile
717
+ [default6]:[rank6]: metadata_group[f"{src.name}.{ext}"] = fn_cache_manager.put(next_module, f"{src.name}.{ext}")
718
+ [default6]:[rank6]: File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/triton/runtime/cache.py", line 123, in put
719
+ [default6]:[rank6]: with open(temp_path, mode) as f:
720
+ [default6]:[rank6]: OSError: [Errno 122] Disk quota exceeded
721
+ W0702 16:23:51.686000 140654571476800 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1286319 closing signal SIGTERM
722
+ W0702 16:23:51.690000 140654571476800 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1286320 closing signal SIGTERM
723
+ W0702 16:23:51.692000 140166684469056 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 3192020 closing signal SIGTERM
724
+ W0702 16:23:51.695000 140654571476800 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1286321 closing signal SIGTERM
725
+ W0702 16:23:51.695000 140166684469056 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 3192022 closing signal SIGTERM
726
+ W0702 16:23:51.699000 140654571476800 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1286322 closing signal SIGTERM
727
+ W0702 16:23:51.695000 140166684469056 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 3192024 closing signal SIGTERM
728
+ W0702 16:23:51.706000 140166684469056 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 3192025 closing signal SIGTERM
729
+ W0702 16:23:51.731000 140654571476800 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 1286325 closing signal SIGTERM
730
+ E0702 16:23:53.311000 140166684469056 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 3192018) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
731
+ Traceback (most recent call last):
732
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
733
+ sys.exit(main())
734
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
735
+ return f(*args, **kwargs)
736
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
737
+ run(args)
738
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
739
+ elastic_launch(
740
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
741
+ return launch_agent(self._config, self._entrypoint, list(args))
742
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
743
+ raise ChildFailedError(
744
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
745
+ ============================================================
746
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
747
+ ------------------------------------------------------------
748
+ Failures:
749
+ [1]:
750
+ time : 2024-07-02_16:23:51
751
+ host : ip-26-0-171-56.ec2.internal
752
+ rank : 9 (local_rank: 1)
753
+ exitcode : 1 (pid: 3192019)
754
+ error_file: <N/A>
755
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
756
+ [2]:
757
+ time : 2024-07-02_16:23:51
758
+ host : ip-26-0-171-56.ec2.internal
759
+ rank : 11 (local_rank: 3)
760
+ exitcode : 1 (pid: 3192021)
761
+ error_file: <N/A>
762
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
763
+ [3]:
764
+ time : 2024-07-02_16:23:51
765
+ host : ip-26-0-171-56.ec2.internal
766
+ rank : 13 (local_rank: 5)
767
+ exitcode : 1 (pid: 3192023)
768
+ error_file: <N/A>
769
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
770
+ ------------------------------------------------------------
771
+ Root Cause (first observed failure):
772
+ [0]:
773
+ time : 2024-07-02_16:23:51
774
+ host : ip-26-0-171-56.ec2.internal
775
+ rank : 8 (local_rank: 0)
776
+ exitcode : 1 (pid: 3192018)
777
+ error_file: <N/A>
778
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
779
+ ============================================================
780
+ srun: error: ip-26-0-171-56: task 1: Exited with exit code 1
781
+ E0702 16:23:53.738000 140654571476800 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: 1) local_rank: 0 (pid: 1286318) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
782
+ Traceback (most recent call last):
783
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
784
+ sys.exit(main())
785
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
786
+ return f(*args, **kwargs)
787
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
788
+ run(args)
789
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
790
+ elastic_launch(
791
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
792
+ return launch_agent(self._config, self._entrypoint, list(args))
793
+ File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
794
+ raise ChildFailedError(
795
+ torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
796
+ ============================================================
797
+ /fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
798
+ ------------------------------------------------------------
799
+ Failures:
800
+ [1]:
801
+ time : 2024-07-02_16:23:51
802
+ host : ip-26-0-160-225.ec2.internal
803
+ rank : 5 (local_rank: 5)
804
+ exitcode : 1 (pid: 1286323)
805
+ error_file: <N/A>
806
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
807
+ [2]:
808
+ time : 2024-07-02_16:23:51
809
+ host : ip-26-0-160-225.ec2.internal
810
+ rank : 6 (local_rank: 6)
811
+ exitcode : 1 (pid: 1286324)
812
+ error_file: <N/A>
813
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
814
+ ------------------------------------------------------------
815
+ Root Cause (first observed failure):
816
+ [0]:
817
+ time : 2024-07-02_16:23:51
818
+ host : ip-26-0-160-225.ec2.internal
819
+ rank : 0 (local_rank: 0)
820
+ exitcode : 1 (pid: 1286318)
821
+ error_file: <N/A>
822
+ traceback : To enable traceback see: https://pytorch.org/docs/stable/elastic/errors.html
823
+ ============================================================
824
+ srun: error: ip-26-0-160-225: task 0: Exited with exit code 1
825
+ Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
llama-1B/16_GPUS/dp-4_tp-4_pp-1_mbz-4/status.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ fail