Upload llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4
Browse files
llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/bench.slurm
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_cluster
|
4 |
+
#SBATCH --time=02:00:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --nodes=1
|
7 |
+
#SBATCH --gres=gpu:8
|
8 |
+
#SBATCH --qos=normal
|
9 |
+
#SBATCH --ntasks-per-node=1
|
10 |
+
#SBATCH --cpus-per-task=96
|
11 |
+
#SBATCH --exclusive
|
12 |
+
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/log.out
|
13 |
+
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/log.out
|
14 |
+
|
15 |
+
# Function to update status based on squeue output
|
16 |
+
update_status() {
|
17 |
+
job_id=$1
|
18 |
+
status_file=$2
|
19 |
+
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
+
while true; do
|
21 |
+
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
+
echo "Job status: $job_status"
|
23 |
+
if [ -z "$job_status" ]; then
|
24 |
+
# Job has finished or is not found
|
25 |
+
break
|
26 |
+
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
+
printf "running" > $status_file
|
28 |
+
break
|
29 |
+
fi
|
30 |
+
sleep 10
|
31 |
+
done
|
32 |
+
}
|
33 |
+
|
34 |
+
# Misc initializations.
|
35 |
+
echo "========================"
|
36 |
+
echo "START TIME: $(date)"
|
37 |
+
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
+
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
+
echo python3 version = $(python3 --version)
|
40 |
+
echo "========================"
|
41 |
+
|
42 |
+
# Slurm stuff
|
43 |
+
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
+
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
+
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
+
|
47 |
+
export TMPDIR=/scratch
|
48 |
+
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
+
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
+
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
+
|
52 |
+
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
+
|
54 |
+
|
55 |
+
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
+
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/config.yaml"
|
57 |
+
|
58 |
+
LAUNCHER="torchrun \
|
59 |
+
--nproc_per_node 8 \
|
60 |
+
--nnodes 1 \
|
61 |
+
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
+
--rdzv_backend c10d \
|
63 |
+
--max_restarts 0 \
|
64 |
+
--tee 3 \
|
65 |
+
--node_rank ${SLURM_PROCID}"
|
66 |
+
|
67 |
+
# Checkout the bench_cluster branch
|
68 |
+
cd $NANOTRON_REPO
|
69 |
+
git checkout bench_cluster
|
70 |
+
cd ..
|
71 |
+
# Get the current job ID
|
72 |
+
job_id=${SLURM_JOB_ID}
|
73 |
+
|
74 |
+
# Update status to "pending" or "running" in the background
|
75 |
+
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/status.txt &
|
76 |
+
|
77 |
+
# Run the main command
|
78 |
+
srun -u $LAUNCHER $CMD
|
79 |
+
exit_status=$?
|
80 |
+
|
81 |
+
# Update status based on the exit status of `srun`
|
82 |
+
if [ $exit_status -eq 0 ]; then
|
83 |
+
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/status.txt
|
84 |
+
else
|
85 |
+
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/log.out; then
|
86 |
+
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/status.txt
|
87 |
+
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/log.out; then
|
88 |
+
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/status.txt
|
89 |
+
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/log.out; then
|
90 |
+
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/status.txt
|
91 |
+
else
|
92 |
+
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/status.txt
|
93 |
+
fi
|
94 |
+
fi
|
95 |
+
|
96 |
+
# Run the report script if the job completed successfully
|
97 |
+
if [ $exit_status -eq 0 ]; then
|
98 |
+
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4 --is_logs
|
99 |
+
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4 --is_profiler
|
100 |
+
fi
|
101 |
+
|
102 |
+
|
103 |
+
# Push to hub the folder using huggingface_cli
|
104 |
+
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4 llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4 --commit-message "Upload llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4"
|
105 |
+
|
106 |
+
# Verify the upload
|
107 |
+
if [ $? -eq 0 ]; then
|
108 |
+
echo "Uploading to Huggingface Hub successful"
|
109 |
+
else
|
110 |
+
echo "Failed to upload to Huggingface Hub"
|
111 |
+
fi
|
llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/config.yaml
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
general:
|
2 |
+
project: bench_cluster
|
3 |
+
seed: 42
|
4 |
+
model:
|
5 |
+
ddp_bucket_cap_mb: 25
|
6 |
+
dtype: bfloat16
|
7 |
+
init_method:
|
8 |
+
std: 0.025
|
9 |
+
make_vocab_size_divisible_by: 1
|
10 |
+
model_config:
|
11 |
+
bos_token_id: 1
|
12 |
+
eos_token_id: 2
|
13 |
+
hidden_act: silu
|
14 |
+
hidden_size: 2048
|
15 |
+
initializer_range: 0.02
|
16 |
+
intermediate_size: 4096
|
17 |
+
is_llama_config: true
|
18 |
+
max_position_embeddings: 4096
|
19 |
+
num_attention_heads: 32
|
20 |
+
num_hidden_layers: 24
|
21 |
+
num_key_value_heads: 32
|
22 |
+
pad_token_id: null
|
23 |
+
pretraining_tp: 1
|
24 |
+
rms_norm_eps: 1.0e-05
|
25 |
+
rope_scaling: null
|
26 |
+
rope_theta: 10000.0
|
27 |
+
tie_word_embeddings: true
|
28 |
+
use_cache: true
|
29 |
+
vocab_size: 50257
|
30 |
+
optimizer:
|
31 |
+
accumulate_grad_in_fp32: true
|
32 |
+
clip_grad: 1.0
|
33 |
+
learning_rate_scheduler:
|
34 |
+
learning_rate: 0.0001
|
35 |
+
lr_decay_style: linear
|
36 |
+
lr_warmup_style: linear
|
37 |
+
lr_warmup_steps: 1
|
38 |
+
min_decay_lr: 1.0e-05
|
39 |
+
optimizer_factory:
|
40 |
+
adam_beta1: 0.9
|
41 |
+
adam_beta2: 0.95
|
42 |
+
adam_eps: 1.0e-08
|
43 |
+
name: adamW
|
44 |
+
torch_adam_is_fused: true
|
45 |
+
weight_decay: 0.01
|
46 |
+
zero_stage: 1
|
47 |
+
parallelism:
|
48 |
+
dp: 1
|
49 |
+
expert_parallel_size: 1
|
50 |
+
pp: 1
|
51 |
+
pp_engine: 1f1b
|
52 |
+
tp: 8
|
53 |
+
tp_linear_async_communication: false
|
54 |
+
tp_mode: REDUCE_SCATTER
|
55 |
+
profiler:
|
56 |
+
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4
|
57 |
+
tokenizer:
|
58 |
+
tokenizer_max_length: null
|
59 |
+
tokenizer_name_or_path: openai-community/gpt2
|
60 |
+
tokenizer_revision: null
|
61 |
+
data_stages:
|
62 |
+
- name: Training Stage
|
63 |
+
start_training_step: 1
|
64 |
+
data:
|
65 |
+
dataset:
|
66 |
+
dataset_overwrite_cache: false
|
67 |
+
dataset_processing_num_proc_per_process: 64
|
68 |
+
hf_dataset_config_name: null
|
69 |
+
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
+
hf_dataset_splits: train
|
71 |
+
text_column_name: text
|
72 |
+
num_loading_workers: 0
|
73 |
+
seed: 42
|
74 |
+
lighteval: null
|
75 |
+
tokens:
|
76 |
+
train_steps: 20
|
77 |
+
val_check_interval: -1
|
78 |
+
batch_accumulation_per_replica: 256
|
79 |
+
limit_test_batches: 0
|
80 |
+
limit_val_batches: 0
|
81 |
+
micro_batch_size: 4
|
82 |
+
sequence_length: 4096
|
83 |
+
logging:
|
84 |
+
iteration_step_info_interval: 1
|
85 |
+
log_level: info
|
86 |
+
log_level_replica: info
|
87 |
+
checkpoints:
|
88 |
+
checkpoint_interval: 100000
|
89 |
+
checkpoints_path: /dev/null
|
90 |
+
resume_checkpoint_path: null
|
llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/log.out
ADDED
@@ -0,0 +1,529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
========================
|
2 |
+
START TIME: Thu Jul 4 00:00:38 UTC 2024
|
3 |
+
python3 version = Python 3.10.14
|
4 |
+
========================
|
5 |
+
The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
|
6 |
+
Token is valid (permission: write).
|
7 |
+
Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
|
8 |
+
Login successful
|
9 |
+
Already on 'bench_cluster'
|
10 |
+
M examples/config_tiny_llama.py
|
11 |
+
M examples/config_tiny_llama.yaml
|
12 |
+
M examples/train_tiny_llama.sh
|
13 |
+
M src/nanotron/models/llama.py
|
14 |
+
M src/nanotron/trainer.py
|
15 |
+
Your branch is up to date with 'origin/bench_cluster'.
|
16 |
+
Job status: RUNNING
|
17 |
+
W0704 00:00:46.144000 139891675088704 torch/distributed/run.py:757]
|
18 |
+
W0704 00:00:46.144000 139891675088704 torch/distributed/run.py:757] *****************************************
|
19 |
+
W0704 00:00:46.144000 139891675088704 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
20 |
+
W0704 00:00:46.144000 139891675088704 torch/distributed/run.py:757] *****************************************
|
21 |
+
[default0]:07/04/2024 00:01:07 [WARNING|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Vocab Size Padding] Padded vocab (size: 50257) with 7 dummy tokens (new size: 50264)
|
22 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config:
|
23 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Config(general=GeneralArgs(project='bench_cluster',
|
24 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: run='%date_%jobid',
|
25 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
|
26 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: step=None,
|
27 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: consumed_train_samples=None,
|
28 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: benchmark_csv_path=None,
|
29 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ignore_sanity_checks=True),
|
30 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: parallelism=ParallelismArgs(dp=1,
|
31 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp=1,
|
32 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp=8,
|
33 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f72f652c880>,
|
34 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
35 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tp_linear_async_communication=False,
|
36 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: expert_parallel_size=1),
|
37 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
|
38 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=2,
|
39 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
|
40 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=2048,
|
41 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
|
42 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=4096,
|
43 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
|
44 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
|
45 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=32,
|
46 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=24,
|
47 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=32,
|
48 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
|
49 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
|
50 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
|
51 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
|
52 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
|
53 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=True,
|
54 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
|
55 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=50264),
|
56 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: init_method=RandomInit(std=0.025),
|
57 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dtype=torch.bfloat16,
|
58 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: make_vocab_size_divisible_by=1,
|
59 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: ddp_bucket_cap_mb=25),
|
60 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
|
61 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_revision=None,
|
62 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokenizer_max_length=None),
|
63 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
|
64 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoint_interval=100000,
|
65 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: save_initial_state=False,
|
66 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: resume_checkpoint_path=None,
|
67 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: checkpoints_path_is_shared_file_system=False),
|
68 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: logging=LoggingArgs(log_level='info',
|
69 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: log_level_replica='info',
|
70 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration_step_info_interval=1),
|
71 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tokens=TokensArgs(sequence_length=4096,
|
72 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: train_steps=20,
|
73 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: micro_batch_size=4,
|
74 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: batch_accumulation_per_replica=256,
|
75 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: val_check_interval=-1,
|
76 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_val_batches=0,
|
77 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: limit_test_batches=0),
|
78 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
79 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta1=0.9,
|
80 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: adam_beta2=0.95,
|
81 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: torch_adam_is_fused=True,
|
82 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: name='adamW'),
|
83 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: zero_stage=1,
|
84 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: weight_decay=0.01,
|
85 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: clip_grad=1.0,
|
86 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: accumulate_grad_in_fp32=True,
|
87 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
|
88 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_steps=1,
|
89 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_warmup_style='linear',
|
90 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_style='linear',
|
91 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_steps=19,
|
92 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lr_decay_starting_step=None,
|
93 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: min_decay_lr=1e-05)),
|
94 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data_stages=[DatasetStageArgs(name='Training Stage',
|
95 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: start_training_step=1,
|
96 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
|
97 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hf_dataset_splits='train',
|
98 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hf_dataset_config_name=None,
|
99 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dataset_processing_num_proc_per_process=64,
|
100 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: dataset_overwrite_cache=False,
|
101 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: text_column_name='text'),
|
102 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: seed=42,
|
103 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_loading_workers=0))],
|
104 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4')),
|
105 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: lighteval=None)
|
106 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Model Config:
|
107 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: LlamaConfig(bos_token_id=1,
|
108 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: eos_token_id=2,
|
109 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_act='silu',
|
110 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: hidden_size=2048,
|
111 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: initializer_range=0.02,
|
112 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: intermediate_size=4096,
|
113 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: is_llama_config=True,
|
114 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: max_position_embeddings=4096,
|
115 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_attention_heads=32,
|
116 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_hidden_layers=24,
|
117 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: num_key_value_heads=32,
|
118 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pad_token_id=None,
|
119 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: pretraining_tp=1,
|
120 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rms_norm_eps=1e-05,
|
121 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_scaling=None,
|
122 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: rope_theta=10000.0,
|
123 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: tie_word_embeddings=True,
|
124 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: use_cache=True,
|
125 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: vocab_size=50264)
|
126 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Building model..
|
127 |
+
[default0]:07/04/2024 00:01:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Setting PP block ranks...
|
128 |
+
[default7]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-225]: Local number of parameters: 139M (264.73MiB)
|
129 |
+
[default7]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-225]: [After model building] Memory usage: 290.76MiB. Peak allocated: 317.33MiB Peak reserved: 324.00MiB
|
130 |
+
[default7]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=7|ip-26-0-160-225]: No checkpoint path provided.
|
131 |
+
[default3]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-225]: Local number of parameters: 139M (264.73MiB)
|
132 |
+
[default3]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-225]: [After model building] Memory usage: 290.76MiB. Peak allocated: 317.33MiB Peak reserved: 324.00MiB
|
133 |
+
[default5]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-225]: Local number of parameters: 139M (264.73MiB)
|
134 |
+
[default5]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-225]: [After model building] Memory usage: 290.76MiB. Peak allocated: 317.33MiB Peak reserved: 324.00MiB
|
135 |
+
[default5]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=5|ip-26-0-160-225]: No checkpoint path provided.
|
136 |
+
[default2]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-225]: Local number of parameters: 139M (264.73MiB)
|
137 |
+
[default2]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-225]: [After model building] Memory usage: 290.76MiB. Peak allocated: 317.33MiB Peak reserved: 324.00MiB
|
138 |
+
[default4]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-225]: Local number of parameters: 139M (264.73MiB)
|
139 |
+
[default4]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-225]: [After model building] Memory usage: 290.76MiB. Peak allocated: 317.33MiB Peak reserved: 324.00MiB
|
140 |
+
[default6]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-225]: Local number of parameters: 139M (264.73MiB)
|
141 |
+
[default6]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-225]: [After model building] Memory usage: 290.76MiB. Peak allocated: 317.33MiB Peak reserved: 324.00MiB
|
142 |
+
[default2]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=2|ip-26-0-160-225]: No checkpoint path provided.
|
143 |
+
[default6]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=6|ip-26-0-160-225]: No checkpoint path provided.
|
144 |
+
[default3]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=3|ip-26-0-160-225]: No checkpoint path provided.
|
145 |
+
[default0]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Total number of parameters: 1.11G (2117.88MiB)
|
146 |
+
[default0]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Local number of parameters: 139M (264.73MiB)
|
147 |
+
[default0]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [After model building] Memory usage: 290.76MiB. Peak allocated: 317.33MiB Peak reserved: 324.00MiB
|
148 |
+
[default0]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: No checkpoint path provided.
|
149 |
+
[default0]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Parametrizing model parameters using StandardParametrizator
|
150 |
+
[default0]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Optimizer Building] Using LearningRateForSP as learning rate
|
151 |
+
[default4]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=4|ip-26-0-160-225]: No checkpoint path provided.
|
152 |
+
[default1]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-225]: Local number of parameters: 139M (264.73MiB)
|
153 |
+
[default1]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-225]: [After model building] Memory usage: 290.76MiB. Peak allocated: 317.33MiB Peak reserved: 324.00MiB
|
154 |
+
[default1]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=1|ip-26-0-160-225]: No checkpoint path provided.
|
155 |
+
[default0]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] Size of optimizer params per rank:
|
156 |
+
[default0]:07/04/2024 00:01:23 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [ZeRO sharding] DP Rank 0 has 139M out of 139M (100.00%) params' optimizer states
|
157 |
+
[default0]:07/04/2024 00:01:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
|
158 |
+
[default0]:07/04/2024 00:01:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Using `datasets` library
|
159 |
+
[default0]:07/04/2024 00:01:29 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
|
160 |
+
[default0]:Repo card metadata block was not found. Setting CardData to empty.
|
161 |
+
[default0]:07/04/2024 00:01:29 [WARNING|DP=0|PP=0|TP=0|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
|
162 |
+
[default0]:07/04/2024 00:01:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Training Plan] There are 1 training stages
|
163 |
+
[default0]:07/04/2024 00:01:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Stage Training Stage] start from step 1
|
164 |
+
[default0]:07/04/2024 00:01:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]:
|
165 |
+
[default0]:07/04/2024 00:01:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: [Start training] datetime: 2024-07-04 00:01:31.597784 | mbs: 4 | grad_accum: 256 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
|
166 |
+
[default0]:07/04/2024 00:01:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
|
167 |
+
[default0]:07/04/2024 00:01:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 1350.75MiB. Peak allocated 1350.76MiB. Peak reserved: 1384.00MiB
|
168 |
+
[default7]:Repo card metadata block was not found. Setting CardData to empty.
|
169 |
+
[default7]:07/04/2024 00:01:31 [WARNING|DP=0|PP=0|TP=7|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
|
170 |
+
[default2]:Repo card metadata block was not found. Setting CardData to empty.
|
171 |
+
[default3]:Repo card metadata block was not found. Setting CardData to empty.
|
172 |
+
[default4]:Repo card metadata block was not found. Setting CardData to empty.
|
173 |
+
[default4]:07/04/2024 00:01:31 [WARNING|DP=0|PP=0|TP=4|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
|
174 |
+
[default6]:07/04/2024 00:01:31 [WARNING|DP=0|PP=0|TP=6|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
|
175 |
+
[default1]:07/04/2024 00:01:31 [WARNING|DP=0|PP=0|TP=1|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
|
176 |
+
[default5]:07/04/2024 00:01:31 [WARNING|DP=0|PP=0|TP=5|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
|
177 |
+
[default2]:07/04/2024 00:01:31 [WARNING|DP=0|PP=0|TP=2|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
|
178 |
+
[default3]:07/04/2024 00:01:31 [WARNING|DP=0|PP=0|TP=3|ip-26-0-160-225]: Repo card metadata block was not found. Setting CardData to empty.
|
179 |
+
[default1]:Repo card metadata block was not found. Setting CardData to empty.
|
180 |
+
[default5]:Repo card metadata block was not found. Setting CardData to empty.
|
181 |
+
[default6]:Repo card metadata block was not found. Setting CardData to empty.
|
182 |
+
[default5]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
183 |
+
[default5]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
184 |
+
[default6]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
185 |
+
[default6]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
186 |
+
[default7]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
187 |
+
[default0]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
188 |
+
[default0]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
189 |
+
[default7]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
190 |
+
[default2]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
191 |
+
[default2]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
192 |
+
[default3]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
193 |
+
[default3]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
194 |
+
[default4]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
195 |
+
[default4]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
196 |
+
[default1]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
197 |
+
[default1]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
198 |
+
[default0]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
199 |
+
[default0]: warnings.warn(
|
200 |
+
[default7]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
201 |
+
[default7]: warnings.warn(
|
202 |
+
[default2]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
203 |
+
[default2]: warnings.warn(
|
204 |
+
[default0]:07/04/2024 00:02:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 1427.16MiB. Peak allocated 8627.80MiB. Peak reserved: 9340.00MiB
|
205 |
+
[default3]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
206 |
+
[default3]: warnings.warn(
|
207 |
+
[default4]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
208 |
+
[default4]: warnings.warn(
|
209 |
+
[default1]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
210 |
+
[default1]: warnings.warn(
|
211 |
+
[default5]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
212 |
+
[default5]: warnings.warn(
|
213 |
+
[default6]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
214 |
+
[default6]: warnings.warn(
|
215 |
+
[default0]:07/04/2024 00:02:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 1 / 20 | consumed_tokens: 4.19M | elapsed_time_per_iteration_ms: 44.2K | tokens_per_sec: 95K | tokens_per_sec_per_gpu: 11.9K | global_batch_size: 1.02K | lm_loss: 11.5 | lr: 0.0001 | model_tflops_per_gpu: 108 | hardware_tflops_per_gpu: 108 | grad_norm: 15.7 | cuda_memory_allocated: 2.61G | cuda_max_memory_reserved: 9.79G | hd_total_memory_tb: 312G | hd_used_memory_tb: 66.5G | hd_free_memory_tb: 246G
|
216 |
+
[default0]:07/04/2024 00:02:15 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2488.00MiB. Peak allocated 2488.00MiB. Peak reserved: 9340.00MiB
|
217 |
+
[default0]:07/04/2024 00:02:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2488.12MiB. Peak allocated 9689.30MiB. Peak reserved: 10130.00MiB
|
218 |
+
[default0]:07/04/2024 00:02:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 2 / 20 | consumed_tokens: 8.39M | elapsed_time_per_iteration_ms: 35K | tokens_per_sec: 120K | tokens_per_sec_per_gpu: 15K | global_batch_size: 1.02K | lm_loss: 11.5 | lr: 9.53e-05 | model_tflops_per_gpu: 136 | hardware_tflops_per_gpu: 136 | grad_norm: 16 | cuda_memory_allocated: 2.61G | cuda_max_memory_reserved: 10.6G | hd_total_memory_tb: 312G | hd_used_memory_tb: 66.5G | hd_free_memory_tb: 246G
|
219 |
+
[default0]:07/04/2024 00:02:50 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2488.00MiB. Peak allocated 2488.20MiB. Peak reserved: 10130.00MiB
|
220 |
+
[default0]:07/04/2024 00:03:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2488.12MiB. Peak allocated 9689.30MiB. Peak reserved: 10130.00MiB
|
221 |
+
[default0]:07/04/2024 00:03:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 3 / 20 | consumed_tokens: 12.6M | elapsed_time_per_iteration_ms: 40.5K | tokens_per_sec: 104K | tokens_per_sec_per_gpu: 13K | global_batch_size: 1.02K | lm_loss: 12.8 | lr: 9.05e-05 | model_tflops_per_gpu: 118 | hardware_tflops_per_gpu: 118 | grad_norm: 137 | cuda_memory_allocated: 2.61G | cuda_max_memory_reserved: 10.6G | hd_total_memory_tb: 312G | hd_used_memory_tb: 66.5G | hd_free_memory_tb: 246G
|
222 |
+
[default0]:07/04/2024 00:03:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2488.00MiB. Peak allocated 2488.20MiB. Peak reserved: 10130.00MiB
|
223 |
+
[default0]:STAGE:2024-07-04 00:03:31 364718:364718 ActivityProfilerController.cpp:314] Completed Stage: Warm Up
|
224 |
+
[default0]:07/04/2024 00:04:24 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2488.12MiB. Peak allocated 9689.30MiB. Peak reserved: 10130.00MiB
|
225 |
+
[default0]:07/04/2024 00:04:24 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 4 / 20 | consumed_tokens: 16.8M | elapsed_time_per_iteration_ms: 53.1K | tokens_per_sec: 78.9K | tokens_per_sec_per_gpu: 9.86K | global_batch_size: 1.02K | lm_loss: 12.2 | lr: 8.58e-05 | model_tflops_per_gpu: 89.5 | hardware_tflops_per_gpu: 89.5 | grad_norm: 22.4 | cuda_memory_allocated: 2.61G | cuda_max_memory_reserved: 10.6G | hd_total_memory_tb: 312G | hd_used_memory_tb: 66.5G | hd_free_memory_tb: 246G
|
226 |
+
[default0]:07/04/2024 00:04:24 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2488.00MiB. Peak allocated 2488.20MiB. Peak reserved: 10130.00MiB
|
227 |
+
[default0]:07/04/2024 00:05:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 5 / 20 | consumed_tokens: 21M | elapsed_time_per_iteration_ms: 53.6K | tokens_per_sec: 78.2K | tokens_per_sec_per_gpu: 9.78K | global_batch_size: 1.02K | lm_loss: 12.4 | lr: 8.11e-05 | model_tflops_per_gpu: 88.7 | hardware_tflops_per_gpu: 88.7 | grad_norm: 42.8
|
228 |
+
[default0]:07/04/2024 00:05:18 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: Memory usage: 2488.00MiB. Peak allocated 9689.30MiB. Peak reserved: 10130.00MiB
|
229 |
+
[default0]:07/04/2024 00:06:11 [INFO|DP=0|PP=0|TP=0|ip-26-0-160-225]: iteration: 6 / 20 | consumed_tokens: 25.2M | elapsed_time_per_iteration_ms: 53.7K | tokens_per_sec: 78.1K | tokens_per_sec_per_gpu: 9.76K | global_batch_size: 1.02K | lm_loss: 11.1 | lr: 7.63e-05 | model_tflops_per_gpu: 88.6 | hardware_tflops_per_gpu: 88.6 | grad_norm: 24.8
|
230 |
+
[default0]:STAGE:2024-07-04 00:08:35 364718:364718 ActivityProfilerController.cpp:320] Completed Stage: Collection
|
231 |
+
[default0]:STAGE:2024-07-04 00:08:50 364718:364718 ActivityProfilerController.cpp:324] Completed Stage: Post Processing
|
232 |
+
[default1]:[rank1]:[E ProcessGroupNCCL.cpp:563] [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600037 milliseconds before timing out.
|
233 |
+
[default5]:[rank5]:[E ProcessGroupNCCL.cpp:563] [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600001 milliseconds before timing out.
|
234 |
+
[default2]:[rank2]:[E ProcessGroupNCCL.cpp:563] [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600058 milliseconds before timing out.
|
235 |
+
[default7]:[rank7]:[E ProcessGroupNCCL.cpp:563] [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600004 milliseconds before timing out.
|
236 |
+
[default4]:[rank4]:[E ProcessGroupNCCL.cpp:563] [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600051 milliseconds before timing out.
|
237 |
+
[default3]:[rank3]:[E ProcessGroupNCCL.cpp:563] [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600087 milliseconds before timing out.
|
238 |
+
[default6]:[rank6]:[E ProcessGroupNCCL.cpp:563] [Rank 6] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600052 milliseconds before timing out.
|
239 |
+
[default2]:[rank2]:[E ProcessGroupNCCL.cpp:1537] [PG 2 Rank 2] Timeout at NCCL work: 305728, last enqueued NCCL work: 305840, last completed NCCL work: 305727.
|
240 |
+
[default2]:[rank2]:[E ProcessGroupNCCL.cpp:577] [Rank 2] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
|
241 |
+
[default2]:[rank2]:[E ProcessGroupNCCL.cpp:583] [Rank 2] To avoid data inconsistency, we are taking the entire process down.
|
242 |
+
[default2]:[rank2]:[E ProcessGroupNCCL.cpp:1414] [PG 2 Rank 2] Process group watchdog thread terminated with exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600058 milliseconds before timing out.
|
243 |
+
[default2]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
244 |
+
[default2]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f5dbd50a897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
245 |
+
[default2]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f5dbe7e3c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
246 |
+
[default2]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f5dbe7e8a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
247 |
+
[default2]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f5dbe7e9dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
248 |
+
[default2]:frame #4: <unknown function> + 0xd3e95 (0x7f5e0a282e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
249 |
+
[default2]:frame #5: <unknown function> + 0x8609 (0x7f5e0f2c9609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
250 |
+
[default2]:frame #6: clone + 0x43 (0x7f5e0f094353 in /lib/x86_64-linux-gnu/libc.so.6)
|
251 |
+
[default2]:
|
252 |
+
[default2]:terminate called after throwing an instance of 'c10::DistBackendError'
|
253 |
+
[default2]: what(): [PG 2 Rank 2] Process group watchdog thread terminated with exception: [Rank 2] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600058 milliseconds before timing out.
|
254 |
+
[default2]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
255 |
+
[default2]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f5dbd50a897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
256 |
+
[default2]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f5dbe7e3c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
257 |
+
[default2]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f5dbe7e8a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
258 |
+
[default2]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f5dbe7e9dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
259 |
+
[default2]:frame #4: <unknown function> + 0xd3e95 (0x7f5e0a282e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
260 |
+
[default2]:frame #5: <unknown function> + 0x8609 (0x7f5e0f2c9609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
261 |
+
[default2]:frame #6: clone + 0x43 (0x7f5e0f094353 in /lib/x86_64-linux-gnu/libc.so.6)
|
262 |
+
[default2]:
|
263 |
+
[default2]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
|
264 |
+
[default2]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f5dbd50a897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
265 |
+
[default2]:frame #1: <unknown function> + 0xe32119 (0x7f5dbe46d119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
266 |
+
[default2]:frame #2: <unknown function> + 0xd3e95 (0x7f5e0a282e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
267 |
+
[default2]:frame #3: <unknown function> + 0x8609 (0x7f5e0f2c9609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
268 |
+
[default2]:frame #4: clone + 0x43 (0x7f5e0f094353 in /lib/x86_64-linux-gnu/libc.so.6)
|
269 |
+
[default2]:
|
270 |
+
[default7]:[rank7]:[E ProcessGroupNCCL.cpp:1537] [PG 2 Rank 7] Timeout at NCCL work: 305728, last enqueued NCCL work: 305840, last completed NCCL work: 305727.
|
271 |
+
[default7]:[rank7]:[E ProcessGroupNCCL.cpp:577] [Rank 7] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
|
272 |
+
[default7]:[rank7]:[E ProcessGroupNCCL.cpp:583] [Rank 7] To avoid data inconsistency, we are taking the entire process down.
|
273 |
+
[default7]:[rank7]:[E ProcessGroupNCCL.cpp:1414] [PG 2 Rank 7] Process group watchdog thread terminated with exception: [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600004 milliseconds before timing out.
|
274 |
+
[default7]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
275 |
+
[default7]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f4e91547897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
276 |
+
[default7]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f4e92820c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
277 |
+
[default7]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f4e92825a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
278 |
+
[default7]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f4e92826dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
279 |
+
[default7]:frame #4: <unknown function> + 0xd3e95 (0x7f4ede2bfe95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
280 |
+
[default7]:frame #5: <unknown function> + 0x8609 (0x7f4ee3306609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
281 |
+
[default7]:frame #6: clone + 0x43 (0x7f4ee30d1353 in /lib/x86_64-linux-gnu/libc.so.6)
|
282 |
+
[default7]:
|
283 |
+
[default7]:terminate called after throwing an instance of 'c10::DistBackendError'
|
284 |
+
[default7]: what(): [PG 2 Rank 7] Process group watchdog thread terminated with exception: [Rank 7] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600004 milliseconds before timing out.
|
285 |
+
[default7]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
286 |
+
[default7]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f4e91547897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
287 |
+
[default7]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f4e92820c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
288 |
+
[default7]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f4e92825a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
289 |
+
[default7]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f4e92826dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
290 |
+
[default7]:frame #4: <unknown function> + 0xd3e95 (0x7f4ede2bfe95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
291 |
+
[default7]:frame #5: <unknown function> + 0x8609 (0x7f4ee3306609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
292 |
+
[default7]:frame #6: clone + 0x43 (0x7f4ee30d1353 in /lib/x86_64-linux-gnu/libc.so.6)
|
293 |
+
[default7]:
|
294 |
+
[default7]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
|
295 |
+
[default7]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f4e91547897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
296 |
+
[default7]:frame #1: <unknown function> + 0xe32119 (0x7f4e924aa119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
297 |
+
[default7]:frame #2: <unknown function> + 0xd3e95 (0x7f4ede2bfe95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
298 |
+
[default7]:frame #3: <unknown function> + 0x8609 (0x7f4ee3306609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
299 |
+
[default7]:frame #4: clone + 0x43 (0x7f4ee30d1353 in /lib/x86_64-linux-gnu/libc.so.6)
|
300 |
+
[default7]:
|
301 |
+
[default4]:[rank4]:[E ProcessGroupNCCL.cpp:1537] [PG 2 Rank 4] Timeout at NCCL work: 305728, last enqueued NCCL work: 305840, last completed NCCL work: 305727.
|
302 |
+
[default4]:[rank4]:[E ProcessGroupNCCL.cpp:577] [Rank 4] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
|
303 |
+
[default3]:[rank3]:[E ProcessGroupNCCL.cpp:1537] [PG 2 Rank 3] Timeout at NCCL work: 305728, last enqueued NCCL work: 305840, last completed NCCL work: 305727.
|
304 |
+
[default3]:[rank3]:[E ProcessGroupNCCL.cpp:577] [Rank 3] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
|
305 |
+
[default4]:[rank4]:[E ProcessGroupNCCL.cpp:583] [Rank 4] To avoid data inconsistency, we are taking the entire process down.
|
306 |
+
[default4]:[rank4]:[E ProcessGroupNCCL.cpp:1414] [PG 2 Rank 4] Process group watchdog thread terminated with exception: [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600051 milliseconds before timing out.
|
307 |
+
[default3]:[rank3]:[E ProcessGroupNCCL.cpp:583] [Rank 3] To avoid data inconsistency, we are taking the entire process down.
|
308 |
+
[default3]:[rank3]:[E ProcessGroupNCCL.cpp:1414] [PG 2 Rank 3] Process group watchdog thread terminated with exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600087 milliseconds before timing out.
|
309 |
+
[default3]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
310 |
+
[default3]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7ffb9df6a897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
311 |
+
[default3]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7ffb9f243c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
312 |
+
[default3]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7ffb9f248a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
313 |
+
[default3]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7ffb9f249dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
314 |
+
[default3]:frame #4: <unknown function> + 0xd3e95 (0x7ffbeace2e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
315 |
+
[default3]:frame #5: <unknown function> + 0x8609 (0x7ffbefd29609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
316 |
+
[default3]:frame #6: clone + 0x43 (0x7ffbefaf4353 in /lib/x86_64-linux-gnu/libc.so.6)
|
317 |
+
[default4]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
318 |
+
[default4]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f3c0b2c2897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
319 |
+
[default4]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f3c0c59bc62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
320 |
+
[default4]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f3c0c5a0a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
321 |
+
[default3]:
|
322 |
+
[default3]:terminate called after throwing an instance of 'c10::DistBackendError'
|
323 |
+
[default3]: what(): [PG 2 Rank 3] Process group watchdog thread terminated with exception: [Rank 3] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600087 milliseconds before timing out.
|
324 |
+
[default3]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
325 |
+
[default3]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7ffb9df6a897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
326 |
+
[default3]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7ffb9f243c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
327 |
+
[default3]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7ffb9f248a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
328 |
+
[default3]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7ffb9f249dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
329 |
+
[default3]:frame #4: <unknown function> + 0xd3e95 (0x7ffbeace2e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
330 |
+
[default3]:frame #5: <unknown function> + 0x8609 (0x7ffbefd29609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
331 |
+
[default3]:frame #6: clone + 0x43 (0x7ffbefaf4353 in /lib/x86_64-linux-gnu/libc.so.6)
|
332 |
+
[default3]:
|
333 |
+
[default3]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
|
334 |
+
[default3]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7ffb9df6a897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
335 |
+
[default3]:frame #1: <unknown function> + 0xe32119 (0x7ffb9eecd119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
336 |
+
[default3]:frame #2: <unknown function> + 0xd3e95 (0x7ffbeace2e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
337 |
+
[default4]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f3c0c5a1dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
338 |
+
[default3]:frame #3: <unknown function> + 0x8609 (0x7ffbefd29609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
339 |
+
[default3]:frame #4: clone + 0x43 (0x7ffbefaf4353 in /lib/x86_64-linux-gnu/libc.so.6)
|
340 |
+
[default3]:
|
341 |
+
[default4]:frame #4: <unknown function> + 0xd3e95 (0x7f3c5803ae95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
342 |
+
[default4]:frame #5: <unknown function> + 0x8609 (0x7f3c5d081609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
343 |
+
[default4]:frame #6: clone + 0x43 (0x7f3c5ce4c353 in /lib/x86_64-linux-gnu/libc.so.6)
|
344 |
+
[default4]:
|
345 |
+
[default4]:terminate called after throwing an instance of 'c10::DistBackendError'
|
346 |
+
[default4]: what(): [PG 2 Rank 4] Process group watchdog thread terminated with exception: [Rank 4] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600051 milliseconds before timing out.
|
347 |
+
[default4]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
348 |
+
[default4]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f3c0b2c2897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
349 |
+
[default4]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f3c0c59bc62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
350 |
+
[default4]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f3c0c5a0a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
351 |
+
[default4]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f3c0c5a1dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
352 |
+
[default4]:frame #4: <unknown function> + 0xd3e95 (0x7f3c5803ae95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
353 |
+
[default4]:frame #5: <unknown function> + 0x8609 (0x7f3c5d081609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
354 |
+
[default4]:frame #6: clone + 0x43 (0x7f3c5ce4c353 in /lib/x86_64-linux-gnu/libc.so.6)
|
355 |
+
[default4]:
|
356 |
+
[default4]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
|
357 |
+
[default4]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f3c0b2c2897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
358 |
+
[default4]:frame #1: <unknown function> + 0xe32119 (0x7f3c0c225119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
359 |
+
[default4]:frame #2: <unknown function> + 0xd3e95 (0x7f3c5803ae95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
360 |
+
[default4]:frame #3: <unknown function> + 0x8609 (0x7f3c5d081609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
361 |
+
[default4]:frame #4: clone + 0x43 (0x7f3c5ce4c353 in /lib/x86_64-linux-gnu/libc.so.6)
|
362 |
+
[default4]:
|
363 |
+
[default6]:[rank6]:[E ProcessGroupNCCL.cpp:1537] [PG 2 Rank 6] Timeout at NCCL work: 305728, last enqueued NCCL work: 305840, last completed NCCL work: 305727.
|
364 |
+
[default6]:[rank6]:[E ProcessGroupNCCL.cpp:577] [Rank 6] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
|
365 |
+
[default6]:[rank6]:[E ProcessGroupNCCL.cpp:583] [Rank 6] To avoid data inconsistency, we are taking the entire process down.
|
366 |
+
[default6]:[rank6]:[E ProcessGroupNCCL.cpp:1414] [PG 2 Rank 6] Process group watchdog thread terminated with exception: [Rank 6] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600052 milliseconds before timing out.
|
367 |
+
[default6]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
368 |
+
[default6]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f471d256897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
369 |
+
[default6]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f471e52fc62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
370 |
+
[default6]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f471e534a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
371 |
+
[default6]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f471e535dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
372 |
+
[default6]:frame #4: <unknown function> + 0xd3e95 (0x7f4769fcee95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
373 |
+
[default6]:frame #5: <unknown function> + 0x8609 (0x7f476f015609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
374 |
+
[default6]:frame #6: clone + 0x43 (0x7f476ede0353 in /lib/x86_64-linux-gnu/libc.so.6)
|
375 |
+
[default6]:
|
376 |
+
[default6]:terminate called after throwing an instance of 'c10::DistBackendError'
|
377 |
+
[default6]: what(): [PG 2 Rank 6] Process group watchdog thread terminated with exception: [Rank 6] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600052 milliseconds before timing out.
|
378 |
+
[default6]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
379 |
+
[default6]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f471d256897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
380 |
+
[default6]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7f471e52fc62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
381 |
+
[default6]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7f471e534a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
382 |
+
[default6]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7f471e535dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
383 |
+
[default6]:frame #4: <unknown function> + 0xd3e95 (0x7f4769fcee95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
384 |
+
[default6]:frame #5: <unknown function> + 0x8609 (0x7f476f015609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
385 |
+
[default6]:frame #6: clone + 0x43 (0x7f476ede0353 in /lib/x86_64-linux-gnu/libc.so.6)
|
386 |
+
[default6]:
|
387 |
+
[default6]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
|
388 |
+
[default6]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7f471d256897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
389 |
+
[default6]:frame #1: <unknown function> + 0xe32119 (0x7f471e1b9119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
390 |
+
[default6]:frame #2: <unknown function> + 0xd3e95 (0x7f4769fcee95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
391 |
+
[default6]:frame #3: <unknown function> + 0x8609 (0x7f476f015609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
392 |
+
[default6]:frame #4: clone + 0x43 (0x7f476ede0353 in /lib/x86_64-linux-gnu/libc.so.6)
|
393 |
+
[default6]:
|
394 |
+
[default1]:[rank1]:[E ProcessGroupNCCL.cpp:1537] [PG 2 Rank 1] Timeout at NCCL work: 305728, last enqueued NCCL work: 305840, last completed NCCL work: 305727.
|
395 |
+
[default1]:[rank1]:[E ProcessGroupNCCL.cpp:577] [Rank 1] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
|
396 |
+
[default1]:[rank1]:[E ProcessGroupNCCL.cpp:583] [Rank 1] To avoid data inconsistency, we are taking the entire process down.
|
397 |
+
[default1]:[rank1]:[E ProcessGroupNCCL.cpp:1414] [PG 2 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600037 milliseconds before timing out.
|
398 |
+
[default1]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
399 |
+
[default1]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7fdff56b0897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
400 |
+
[default1]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7fdff6989c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
401 |
+
[default1]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7fdff698ea80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
402 |
+
[default1]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7fdff698fdcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
403 |
+
[default1]:frame #4: <unknown function> + 0xd3e95 (0x7fe042428e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
404 |
+
[default1]:frame #5: <unknown function> + 0x8609 (0x7fe04746f609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
405 |
+
[default1]:frame #6: clone + 0x43 (0x7fe04723a353 in /lib/x86_64-linux-gnu/libc.so.6)
|
406 |
+
[default1]:
|
407 |
+
[default1]:terminate called after throwing an instance of 'c10::DistBackendError'
|
408 |
+
[default1]: what(): [PG 2 Rank 1] Process group watchdog thread terminated with exception: [Rank 1] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600037 milliseconds before timing out.
|
409 |
+
[default1]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
410 |
+
[default1]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7fdff56b0897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
411 |
+
[default1]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7fdff6989c62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
412 |
+
[default1]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7fdff698ea80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
413 |
+
[default1]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7fdff698fdcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
414 |
+
[default1]:frame #4: <unknown function> + 0xd3e95 (0x7fe042428e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
415 |
+
[default1]:frame #5: <unknown function> + 0x8609 (0x7fe04746f609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
416 |
+
[default1]:frame #6: clone + 0x43 (0x7fe04723a353 in /lib/x86_64-linux-gnu/libc.so.6)
|
417 |
+
[default1]:
|
418 |
+
[default1]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
|
419 |
+
[default1]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7fdff56b0897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
420 |
+
[default1]:frame #1: <unknown function> + 0xe32119 (0x7fdff6613119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
421 |
+
[default1]:frame #2: <unknown function> + 0xd3e95 (0x7fe042428e95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
422 |
+
[default1]:frame #3: <unknown function> + 0x8609 (0x7fe04746f609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
423 |
+
[default1]:frame #4: clone + 0x43 (0x7fe04723a353 in /lib/x86_64-linux-gnu/libc.so.6)
|
424 |
+
[default1]:
|
425 |
+
[default5]:[rank5]:[E ProcessGroupNCCL.cpp:1537] [PG 2 Rank 5] Timeout at NCCL work: 305728, last enqueued NCCL work: 305840, last completed NCCL work: 305727.
|
426 |
+
[default5]:[rank5]:[E ProcessGroupNCCL.cpp:577] [Rank 5] Some NCCL operations have failed or timed out. Due to the asynchronous nature of CUDA kernels, subsequent GPU operations might run on corrupted/incomplete data.
|
427 |
+
[default5]:[rank5]:[E ProcessGroupNCCL.cpp:583] [Rank 5] To avoid data inconsistency, we are taking the entire process down.
|
428 |
+
[default5]:[rank5]:[E ProcessGroupNCCL.cpp:1414] [PG 2 Rank 5] Process group watchdog thread terminated with exception: [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600001 milliseconds before timing out.
|
429 |
+
[default5]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
430 |
+
[default5]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7fcb26855897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
431 |
+
[default5]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7fcb27b2ec62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
432 |
+
[default5]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7fcb27b33a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
433 |
+
[default5]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7fcb27b34dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
434 |
+
[default5]:frame #4: <unknown function> + 0xd3e95 (0x7fcb735cde95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
435 |
+
[default5]:frame #5: <unknown function> + 0x8609 (0x7fcb78614609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
436 |
+
[default5]:frame #6: clone + 0x43 (0x7fcb783df353 in /lib/x86_64-linux-gnu/libc.so.6)
|
437 |
+
[default5]:
|
438 |
+
[default5]:terminate called after throwing an instance of 'c10::DistBackendError'
|
439 |
+
[default5]: what(): [PG 2 Rank 5] Process group watchdog thread terminated with exception: [Rank 5] Watchdog caught collective operation timeout: WorkNCCL(SeqNum=305728, OpType=_REDUCE_SCATTER_BASE, NumelIn=33554432, NumelOut=4194304, Timeout(ms)=600000) ran for 600001 milliseconds before timing out.
|
440 |
+
[default5]:Exception raised from checkTimeout at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:565 (most recent call first):
|
441 |
+
[default5]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7fcb26855897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
442 |
+
[default5]:frame #1: c10d::ProcessGroupNCCL::WorkNCCL::checkTimeout(std::optional<std::chrono::duration<long, std::ratio<1l, 1000l> > >) + 0x1d2 (0x7fcb27b2ec62 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
443 |
+
[default5]:frame #2: c10d::ProcessGroupNCCL::watchdogHandler() + 0x1a0 (0x7fcb27b33a80 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
444 |
+
[default5]:frame #3: c10d::ProcessGroupNCCL::ncclCommWatchdog() + 0x10c (0x7fcb27b34dcc in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
445 |
+
[default5]:frame #4: <unknown function> + 0xd3e95 (0x7fcb735cde95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
446 |
+
[default5]:frame #5: <unknown function> + 0x8609 (0x7fcb78614609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
447 |
+
[default5]:frame #6: clone + 0x43 (0x7fcb783df353 in /lib/x86_64-linux-gnu/libc.so.6)
|
448 |
+
[default5]:
|
449 |
+
[default5]:Exception raised from ncclCommWatchdog at ../torch/csrc/distributed/c10d/ProcessGroupNCCL.cpp:1418 (most recent call first):
|
450 |
+
[default5]:frame #0: c10::Error::Error(c10::SourceLocation, std::string) + 0x57 (0x7fcb26855897 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libc10.so)
|
451 |
+
[default5]:frame #1: <unknown function> + 0xe32119 (0x7fcb277b8119 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/lib/libtorch_cuda.so)
|
452 |
+
[default5]:frame #2: <unknown function> + 0xd3e95 (0x7fcb735cde95 in /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/../lib/libstdc++.so.6)
|
453 |
+
[default5]:frame #3: <unknown function> + 0x8609 (0x7fcb78614609 in /lib/x86_64-linux-gnu/libpthread.so.0)
|
454 |
+
[default5]:frame #4: clone + 0x43 (0x7fcb783df353 in /lib/x86_64-linux-gnu/libc.so.6)
|
455 |
+
[default5]:
|
456 |
+
W0704 00:16:42.423000 139891675088704 torch/distributed/elastic/multiprocessing/api.py:851] Sending process 364718 closing signal SIGTERM
|
457 |
+
E0704 00:16:48.588000 139891675088704 torch/distributed/elastic/multiprocessing/api.py:826] failed (exitcode: -6) local_rank: 1 (pid: 364719) of binary: /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/python3.10
|
458 |
+
Traceback (most recent call last):
|
459 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/bin/torchrun", line 8, in <module>
|
460 |
+
sys.exit(main())
|
461 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/elastic/multiprocessing/errors/__init__.py", line 347, in wrapper
|
462 |
+
return f(*args, **kwargs)
|
463 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 879, in main
|
464 |
+
run(args)
|
465 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/run.py", line 870, in run
|
466 |
+
elastic_launch(
|
467 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 132, in __call__
|
468 |
+
return launch_agent(self._config, self._entrypoint, list(args))
|
469 |
+
File "/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/launcher/api.py", line 263, in launch_agent
|
470 |
+
raise ChildFailedError(
|
471 |
+
torch.distributed.elastic.multiprocessing.errors.ChildFailedError:
|
472 |
+
============================================================
|
473 |
+
/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron/run_train.py FAILED
|
474 |
+
------------------------------------------------------------
|
475 |
+
Failures:
|
476 |
+
[1]:
|
477 |
+
time : 2024-07-04_00:16:42
|
478 |
+
host : ip-26-0-160-225.ec2.internal
|
479 |
+
rank : 2 (local_rank: 2)
|
480 |
+
exitcode : -6 (pid: 364720)
|
481 |
+
error_file: <N/A>
|
482 |
+
traceback : Signal 6 (SIGABRT) received by PID 364720
|
483 |
+
[2]:
|
484 |
+
time : 2024-07-04_00:16:42
|
485 |
+
host : ip-26-0-160-225.ec2.internal
|
486 |
+
rank : 3 (local_rank: 3)
|
487 |
+
exitcode : -6 (pid: 364721)
|
488 |
+
error_file: <N/A>
|
489 |
+
traceback : Signal 6 (SIGABRT) received by PID 364721
|
490 |
+
[3]:
|
491 |
+
time : 2024-07-04_00:16:42
|
492 |
+
host : ip-26-0-160-225.ec2.internal
|
493 |
+
rank : 4 (local_rank: 4)
|
494 |
+
exitcode : -6 (pid: 364722)
|
495 |
+
error_file: <N/A>
|
496 |
+
traceback : Signal 6 (SIGABRT) received by PID 364722
|
497 |
+
[4]:
|
498 |
+
time : 2024-07-04_00:16:42
|
499 |
+
host : ip-26-0-160-225.ec2.internal
|
500 |
+
rank : 5 (local_rank: 5)
|
501 |
+
exitcode : -6 (pid: 364723)
|
502 |
+
error_file: <N/A>
|
503 |
+
traceback : Signal 6 (SIGABRT) received by PID 364723
|
504 |
+
[5]:
|
505 |
+
time : 2024-07-04_00:16:42
|
506 |
+
host : ip-26-0-160-225.ec2.internal
|
507 |
+
rank : 6 (local_rank: 6)
|
508 |
+
exitcode : -6 (pid: 364724)
|
509 |
+
error_file: <N/A>
|
510 |
+
traceback : Signal 6 (SIGABRT) received by PID 364724
|
511 |
+
[6]:
|
512 |
+
time : 2024-07-04_00:16:42
|
513 |
+
host : ip-26-0-160-225.ec2.internal
|
514 |
+
rank : 7 (local_rank: 7)
|
515 |
+
exitcode : -6 (pid: 364725)
|
516 |
+
error_file: <N/A>
|
517 |
+
traceback : Signal 6 (SIGABRT) received by PID 364725
|
518 |
+
------------------------------------------------------------
|
519 |
+
Root Cause (first observed failure):
|
520 |
+
[0]:
|
521 |
+
time : 2024-07-04_00:16:42
|
522 |
+
host : ip-26-0-160-225.ec2.internal
|
523 |
+
rank : 1 (local_rank: 1)
|
524 |
+
exitcode : -6 (pid: 364719)
|
525 |
+
error_file: <N/A>
|
526 |
+
traceback : Signal 6 (SIGABRT) received by PID 364719
|
527 |
+
============================================================
|
528 |
+
srun: error: ip-26-0-160-225: task 0: Exited with exit code 1
|
529 |
+
Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
|
llama-1B/8_GPUS/dp-1_tp-8_pp-1_mbz-4/status.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
timeout
|