Upload llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16
Browse files- .gitattributes +1 -0
- llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/bench.slurm +111 -0
- llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/config.yaml +90 -0
- llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log.out +257 -0
- llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log_metrics.csv +21 -0
- llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/profiler.csv +2 -0
- llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/profiler/ip-26-0-162-233_1835168.1720046865197689959.pt.trace.json +3 -0
- llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/status.txt +1 -0
.gitattributes
CHANGED
@@ -109,3 +109,4 @@ llama-1B/8_GPUS/dp-4_tp-2_pp-1_mbz-2/profiler/ip-26-0-174-36_223115.172004284691
|
|
109 |
llama-1B/8_GPUS/dp-2_tp-2_pp-2_mbz-4/profiler/ip-26-0-164-207_594258.1720042744576660800.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
110 |
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-4/profiler/ip-26-0-160-225_339725.1720044373966110627.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
111 |
llama-1B/8_GPUS/dp-2_tp-1_pp-4_mbz-4/profiler/ip-26-0-164-207_605035.1720046681997701534.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
|
|
|
109 |
llama-1B/8_GPUS/dp-2_tp-2_pp-2_mbz-4/profiler/ip-26-0-164-207_594258.1720042744576660800.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
110 |
llama-1B/8_GPUS/dp-8_tp-1_pp-1_mbz-4/profiler/ip-26-0-160-225_339725.1720044373966110627.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
111 |
llama-1B/8_GPUS/dp-2_tp-1_pp-4_mbz-4/profiler/ip-26-0-164-207_605035.1720046681997701534.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
112 |
+
llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/profiler/ip-26-0-162-233_1835168.1720046865197689959.pt.trace.json filter=lfs diff=lfs merge=lfs -text
|
llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/bench.slurm
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
|
3 |
+
#SBATCH --job-name=bench_cluster
|
4 |
+
#SBATCH --time=02:00:00
|
5 |
+
#SBATCH --partition=hopper-prod
|
6 |
+
#SBATCH --nodes=1
|
7 |
+
#SBATCH --gres=gpu:8
|
8 |
+
#SBATCH --qos=normal
|
9 |
+
#SBATCH --ntasks-per-node=1
|
10 |
+
#SBATCH --cpus-per-task=96
|
11 |
+
#SBATCH --exclusive
|
12 |
+
#SBATCH --output=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log.out
|
13 |
+
#SBATCH --error=/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log.out
|
14 |
+
|
15 |
+
# Function to update status based on squeue output
|
16 |
+
update_status() {
|
17 |
+
job_id=$1
|
18 |
+
status_file=$2
|
19 |
+
# For unknown reasons, it doenst update status for pending. It only works for running
|
20 |
+
while true; do
|
21 |
+
job_status=$(squeue --job $job_id --noheader --format=%T)
|
22 |
+
echo "Job status: $job_status"
|
23 |
+
if [ -z "$job_status" ]; then
|
24 |
+
# Job has finished or is not found
|
25 |
+
break
|
26 |
+
elif [ "$job_status" = "RUNNING" ]; then
|
27 |
+
printf "running" > $status_file
|
28 |
+
break
|
29 |
+
fi
|
30 |
+
sleep 10
|
31 |
+
done
|
32 |
+
}
|
33 |
+
|
34 |
+
# Misc initializations.
|
35 |
+
echo "========================"
|
36 |
+
echo "START TIME: $(date)"
|
37 |
+
source /fsx/ferdinandmom/miniforge3/etc/profile.d/conda.sh
|
38 |
+
conda activate /fsx/ferdinandmom/miniforge3/envs/env-bench-cluster
|
39 |
+
echo python3 version = $(python3 --version)
|
40 |
+
echo "========================"
|
41 |
+
|
42 |
+
# Slurm stuff
|
43 |
+
export HOSTNAMES=$(scontrol show hostnames "$SLURM_JOB_NODELIST")
|
44 |
+
export MASTER_ADDR=$(scontrol show hostnames "$SLURM_JOB_NODELIST" | head -n 1)
|
45 |
+
export MASTER_PORT=$((1024 + RANDOM % 64511))
|
46 |
+
|
47 |
+
export TMPDIR=/scratch
|
48 |
+
export HF_DATASETS_CACHE="/admin/home/ferdinand_mom/.cache"
|
49 |
+
export CUBLAS_WORKSPACE_CONFIG=":4096:8"
|
50 |
+
export CUDA_DEVICE_MAX_CONNECTIONS="1"
|
51 |
+
|
52 |
+
huggingface-cli login --token $HUGGINGFACE_TOKEN
|
53 |
+
|
54 |
+
|
55 |
+
NANOTRON_REPO="/fsx/ferdinandmom/ferdinand-hf/bench_cluster/nanotron"
|
56 |
+
CMD="$NANOTRON_REPO/run_train.py --config-file /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/config.yaml"
|
57 |
+
|
58 |
+
LAUNCHER="torchrun \
|
59 |
+
--nproc_per_node 8 \
|
60 |
+
--nnodes 1 \
|
61 |
+
--rdzv_endpoint ${MASTER_ADDR}:${MASTER_PORT} \
|
62 |
+
--rdzv_backend c10d \
|
63 |
+
--max_restarts 0 \
|
64 |
+
--tee 3 \
|
65 |
+
--node_rank ${SLURM_PROCID}"
|
66 |
+
|
67 |
+
# Checkout the bench_cluster branch
|
68 |
+
cd $NANOTRON_REPO
|
69 |
+
git checkout bench_cluster
|
70 |
+
cd ..
|
71 |
+
# Get the current job ID
|
72 |
+
job_id=${SLURM_JOB_ID}
|
73 |
+
|
74 |
+
# Update status to "pending" or "running" in the background
|
75 |
+
update_status $job_id /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/status.txt &
|
76 |
+
|
77 |
+
# Run the main command
|
78 |
+
srun -u $LAUNCHER $CMD
|
79 |
+
exit_status=$?
|
80 |
+
|
81 |
+
# Update status based on the exit status of `srun`
|
82 |
+
if [ $exit_status -eq 0 ]; then
|
83 |
+
printf "completed" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/status.txt
|
84 |
+
else
|
85 |
+
if grep -q "OutOfMemoryError" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log.out; then
|
86 |
+
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/status.txt
|
87 |
+
elif grep -q " CUDA error: an illegal memory access" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log.out; then
|
88 |
+
printf "oom" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/status.txt
|
89 |
+
elif grep -q "Timeout at NCCL" /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log.out; then
|
90 |
+
printf "timeout" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/status.txt
|
91 |
+
else
|
92 |
+
printf "fail" > /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/status.txt
|
93 |
+
fi
|
94 |
+
fi
|
95 |
+
|
96 |
+
# Run the report script if the job completed successfully
|
97 |
+
if [ $exit_status -eq 0 ]; then
|
98 |
+
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16 --is_logs
|
99 |
+
python /fsx/ferdinandmom/ferdinand-hf/bench_cluster/main.py report --inp_dir /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16 --is_profiler
|
100 |
+
fi
|
101 |
+
|
102 |
+
|
103 |
+
# Push to hub the folder using huggingface_cli
|
104 |
+
huggingface-cli upload nanotron/bench_cluster /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16 llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16 --commit-message "Upload llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16"
|
105 |
+
|
106 |
+
# Verify the upload
|
107 |
+
if [ $? -eq 0 ]; then
|
108 |
+
echo "Uploading to Huggingface Hub successful"
|
109 |
+
else
|
110 |
+
echo "Failed to upload to Huggingface Hub"
|
111 |
+
fi
|
llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/config.yaml
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
general:
|
2 |
+
project: bench_cluster
|
3 |
+
seed: 42
|
4 |
+
model:
|
5 |
+
ddp_bucket_cap_mb: 25
|
6 |
+
dtype: bfloat16
|
7 |
+
init_method:
|
8 |
+
std: 0.025
|
9 |
+
make_vocab_size_divisible_by: 1
|
10 |
+
model_config:
|
11 |
+
bos_token_id: 1
|
12 |
+
eos_token_id: 2
|
13 |
+
hidden_act: silu
|
14 |
+
hidden_size: 2048
|
15 |
+
initializer_range: 0.02
|
16 |
+
intermediate_size: 4096
|
17 |
+
is_llama_config: true
|
18 |
+
max_position_embeddings: 4096
|
19 |
+
num_attention_heads: 32
|
20 |
+
num_hidden_layers: 24
|
21 |
+
num_key_value_heads: 32
|
22 |
+
pad_token_id: null
|
23 |
+
pretraining_tp: 1
|
24 |
+
rms_norm_eps: 1.0e-05
|
25 |
+
rope_scaling: null
|
26 |
+
rope_theta: 10000.0
|
27 |
+
tie_word_embeddings: true
|
28 |
+
use_cache: true
|
29 |
+
vocab_size: 50257
|
30 |
+
optimizer:
|
31 |
+
accumulate_grad_in_fp32: true
|
32 |
+
clip_grad: 1.0
|
33 |
+
learning_rate_scheduler:
|
34 |
+
learning_rate: 0.0001
|
35 |
+
lr_decay_style: linear
|
36 |
+
lr_warmup_style: linear
|
37 |
+
lr_warmup_steps: 1
|
38 |
+
min_decay_lr: 1.0e-05
|
39 |
+
optimizer_factory:
|
40 |
+
adam_beta1: 0.9
|
41 |
+
adam_beta2: 0.95
|
42 |
+
adam_eps: 1.0e-08
|
43 |
+
name: adamW
|
44 |
+
torch_adam_is_fused: true
|
45 |
+
weight_decay: 0.01
|
46 |
+
zero_stage: 1
|
47 |
+
parallelism:
|
48 |
+
dp: 2
|
49 |
+
expert_parallel_size: 1
|
50 |
+
pp: 1
|
51 |
+
pp_engine: 1f1b
|
52 |
+
tp: 4
|
53 |
+
tp_linear_async_communication: false
|
54 |
+
tp_mode: REDUCE_SCATTER
|
55 |
+
profiler:
|
56 |
+
profiler_export_path: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16
|
57 |
+
tokenizer:
|
58 |
+
tokenizer_max_length: null
|
59 |
+
tokenizer_name_or_path: openai-community/gpt2
|
60 |
+
tokenizer_revision: null
|
61 |
+
data_stages:
|
62 |
+
- name: Training Stage
|
63 |
+
start_training_step: 1
|
64 |
+
data:
|
65 |
+
dataset:
|
66 |
+
dataset_overwrite_cache: false
|
67 |
+
dataset_processing_num_proc_per_process: 64
|
68 |
+
hf_dataset_config_name: null
|
69 |
+
hf_dataset_or_datasets: roneneldan/TinyStories
|
70 |
+
hf_dataset_splits: train
|
71 |
+
text_column_name: text
|
72 |
+
num_loading_workers: 0
|
73 |
+
seed: 42
|
74 |
+
lighteval: null
|
75 |
+
tokens:
|
76 |
+
train_steps: 20
|
77 |
+
val_check_interval: -1
|
78 |
+
batch_accumulation_per_replica: 32
|
79 |
+
limit_test_batches: 0
|
80 |
+
limit_val_batches: 0
|
81 |
+
micro_batch_size: 16
|
82 |
+
sequence_length: 4096
|
83 |
+
logging:
|
84 |
+
iteration_step_info_interval: 1
|
85 |
+
log_level: info
|
86 |
+
log_level_replica: info
|
87 |
+
checkpoints:
|
88 |
+
checkpoint_interval: 100000
|
89 |
+
checkpoints_path: /dev/null
|
90 |
+
resume_checkpoint_path: null
|
llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log.out
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
========================
|
2 |
+
START TIME: Wed Jul 3 22:42:51 UTC 2024
|
3 |
+
python3 version = Python 3.10.14
|
4 |
+
========================
|
5 |
+
The token has not been saved to the git credentials helper. Pass `add_to_git_credential=True` in this function directly or `--add-to-git-credential` if using via `huggingface-cli` if you want to set the git credential as well.
|
6 |
+
Token is valid (permission: write).
|
7 |
+
Your token has been saved to /admin/home/ferdinand_mom/.cache/huggingface/token
|
8 |
+
Login successful
|
9 |
+
Already on 'bench_cluster'
|
10 |
+
M examples/config_tiny_llama.py
|
11 |
+
M examples/config_tiny_llama.yaml
|
12 |
+
M examples/train_tiny_llama.sh
|
13 |
+
M src/nanotron/models/llama.py
|
14 |
+
M src/nanotron/trainer.py
|
15 |
+
Your branch is up to date with 'origin/bench_cluster'.
|
16 |
+
Job status: RUNNING
|
17 |
+
W0703 22:42:58.927000 140687439050560 torch/distributed/run.py:757]
|
18 |
+
W0703 22:42:58.927000 140687439050560 torch/distributed/run.py:757] *****************************************
|
19 |
+
W0703 22:42:58.927000 140687439050560 torch/distributed/run.py:757] Setting OMP_NUM_THREADS environment variable for each process to be 1 in default, to avoid your system being overloaded, please further tune the variable for optimal performance in your application as needed.
|
20 |
+
W0703 22:42:58.927000 140687439050560 torch/distributed/run.py:757] *****************************************
|
21 |
+
[default0]:07/03/2024 22:43:20 [WARNING|DP=0|PP=0|TP=0|ip-26-0-162-233]: [Vocab Size Padding] Padded vocab (size: 50257) with 3 dummy tokens (new size: 50260)
|
22 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Config:
|
23 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Config(general=GeneralArgs(project='bench_cluster',
|
24 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: run='%date_%jobid',
|
25 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: seed=42,
|
26 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: step=None,
|
27 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: consumed_train_samples=None,
|
28 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: benchmark_csv_path=None,
|
29 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: ignore_sanity_checks=True),
|
30 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: parallelism=ParallelismArgs(dp=2,
|
31 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: pp=1,
|
32 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tp=4,
|
33 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: pp_engine=<nanotron.parallel.pipeline_parallel.engine.OneForwardOneBackwardPipelineEngine object at 0x7f4cf32dc670>,
|
34 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tp_mode=<TensorParallelLinearMode.REDUCE_SCATTER: 2>,
|
35 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tp_linear_async_communication=False,
|
36 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: expert_parallel_size=1),
|
37 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: model=ModelArgs(model_config=LlamaConfig(bos_token_id=1,
|
38 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: eos_token_id=2,
|
39 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: hidden_act='silu',
|
40 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: hidden_size=2048,
|
41 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: initializer_range=0.02,
|
42 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: intermediate_size=4096,
|
43 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: is_llama_config=True,
|
44 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: max_position_embeddings=4096,
|
45 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: num_attention_heads=32,
|
46 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: num_hidden_layers=24,
|
47 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: num_key_value_heads=32,
|
48 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: pad_token_id=None,
|
49 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: pretraining_tp=1,
|
50 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: rms_norm_eps=1e-05,
|
51 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: rope_scaling=None,
|
52 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: rope_theta=10000.0,
|
53 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tie_word_embeddings=True,
|
54 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: use_cache=True,
|
55 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: vocab_size=50260),
|
56 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: init_method=RandomInit(std=0.025),
|
57 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: dtype=torch.bfloat16,
|
58 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: make_vocab_size_divisible_by=1,
|
59 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: ddp_bucket_cap_mb=25),
|
60 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tokenizer=TokenizerArgs(tokenizer_name_or_path='openai-community/gpt2',
|
61 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tokenizer_revision=None,
|
62 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tokenizer_max_length=None),
|
63 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: checkpoints=CheckpointsArgs(checkpoints_path=Path('/dev/null'),
|
64 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: checkpoint_interval=100000,
|
65 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: save_initial_state=False,
|
66 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: resume_checkpoint_path=None,
|
67 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: checkpoints_path_is_shared_file_system=False),
|
68 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: logging=LoggingArgs(log_level='info',
|
69 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: log_level_replica='info',
|
70 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration_step_info_interval=1),
|
71 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tokens=TokensArgs(sequence_length=4096,
|
72 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: train_steps=20,
|
73 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: micro_batch_size=16,
|
74 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: batch_accumulation_per_replica=32,
|
75 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: val_check_interval=-1,
|
76 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: limit_val_batches=0,
|
77 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: limit_test_batches=0),
|
78 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: optimizer=OptimizerArgs(optimizer_factory=AdamWOptimizerArgs(adam_eps=1e-08,
|
79 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: adam_beta1=0.9,
|
80 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: adam_beta2=0.95,
|
81 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: torch_adam_is_fused=True,
|
82 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: name='adamW'),
|
83 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: zero_stage=1,
|
84 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: weight_decay=0.01,
|
85 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: clip_grad=1.0,
|
86 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: accumulate_grad_in_fp32=True,
|
87 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: learning_rate_scheduler=LRSchedulerArgs(learning_rate=0.0001,
|
88 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: lr_warmup_steps=1,
|
89 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: lr_warmup_style='linear',
|
90 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: lr_decay_style='linear',
|
91 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: lr_decay_steps=19,
|
92 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: lr_decay_starting_step=None,
|
93 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: min_decay_lr=1e-05)),
|
94 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: data_stages=[DatasetStageArgs(name='Training Stage',
|
95 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: start_training_step=1,
|
96 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: data=DataArgs(dataset=PretrainDatasetsArgs(hf_dataset_or_datasets='roneneldan/TinyStories',
|
97 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: hf_dataset_splits='train',
|
98 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: hf_dataset_config_name=None,
|
99 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: dataset_processing_num_proc_per_process=64,
|
100 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: dataset_overwrite_cache=False,
|
101 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: text_column_name='text'),
|
102 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: seed=42,
|
103 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: num_loading_workers=0))],
|
104 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: profiler=ProfilerArgs(profiler_export_path=Path('/fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16')),
|
105 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: lighteval=None)
|
106 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Model Config:
|
107 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: LlamaConfig(bos_token_id=1,
|
108 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: eos_token_id=2,
|
109 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: hidden_act='silu',
|
110 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: hidden_size=2048,
|
111 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: initializer_range=0.02,
|
112 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: intermediate_size=4096,
|
113 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: is_llama_config=True,
|
114 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: max_position_embeddings=4096,
|
115 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: num_attention_heads=32,
|
116 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: num_hidden_layers=24,
|
117 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: num_key_value_heads=32,
|
118 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: pad_token_id=None,
|
119 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: pretraining_tp=1,
|
120 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: rms_norm_eps=1e-05,
|
121 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: rope_scaling=None,
|
122 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: rope_theta=10000.0,
|
123 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: tie_word_embeddings=True,
|
124 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: use_cache=True,
|
125 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: vocab_size=50260)
|
126 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Building model..
|
127 |
+
[default0]:07/03/2024 22:43:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Setting PP block ranks...
|
128 |
+
[default3]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=3|ip-26-0-162-233]: Local number of parameters: 277M (529.27MiB)
|
129 |
+
[default3]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=3|ip-26-0-162-233]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
|
130 |
+
[default3]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=3|ip-26-0-162-233]: No checkpoint path provided.
|
131 |
+
[default0]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Total number of parameters: 1.11G (2117.09MiB)
|
132 |
+
[default0]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Local number of parameters: 277M (529.27MiB)
|
133 |
+
[default0]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
|
134 |
+
[default0]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: No checkpoint path provided.
|
135 |
+
[default0]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Parametrizing model parameters using StandardParametrizator
|
136 |
+
[default1]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=1|ip-26-0-162-233]: Local number of parameters: 277M (529.27MiB)
|
137 |
+
[default1]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=1|ip-26-0-162-233]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
|
138 |
+
[default1]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=1|ip-26-0-162-233]: No checkpoint path provided.
|
139 |
+
[default2]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=2|ip-26-0-162-233]: Local number of parameters: 277M (529.27MiB)
|
140 |
+
[default2]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=2|ip-26-0-162-233]: [After model building] Memory usage: 554.21MiB. Peak allocated: 606.24MiB Peak reserved: 608.00MiB
|
141 |
+
[default2]:07/03/2024 22:43:33 [INFO|DP=0|PP=0|TP=2|ip-26-0-162-233]: No checkpoint path provided.
|
142 |
+
[default6]:07/03/2024 22:43:33 [INFO|DP=1|PP=0|TP=2|ip-26-0-162-233]: No checkpoint path provided.
|
143 |
+
[default4]:07/03/2024 22:43:33 [INFO|DP=1|PP=0|TP=0|ip-26-0-162-233]: No checkpoint path provided.
|
144 |
+
[default5]:07/03/2024 22:43:33 [INFO|DP=1|PP=0|TP=1|ip-26-0-162-233]: No checkpoint path provided.
|
145 |
+
[default7]:07/03/2024 22:43:33 [INFO|DP=1|PP=0|TP=3|ip-26-0-162-233]: No checkpoint path provided.
|
146 |
+
[default0]:07/03/2024 22:43:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [Optimizer Building] Using LearningRateForSP as learning rate
|
147 |
+
[default0]:07/03/2024 22:43:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [ZeRO sharding] Size of optimizer params per rank:
|
148 |
+
[default0]:07/03/2024 22:43:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [ZeRO sharding] DP Rank 0 has 139M out of 277M (50.00%) params' optimizer states
|
149 |
+
[default0]:07/03/2024 22:43:36 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [ZeRO sharding] DP Rank 1 has 139M out of 277M (50.00%) params' optimizer states
|
150 |
+
[default0]:07/03/2024 22:43:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [Training Plan] Stage Training Stage has 19 remaining training steps and has consumed 0 samples
|
151 |
+
[default0]:07/03/2024 22:43:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Using `datasets` library
|
152 |
+
[default0]:07/03/2024 22:43:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Loading tokenizer from openai-community/gpt2 and transformers/hf_hub versions ('4.41.2', '0.23.4')
|
153 |
+
[default0]:Repo card metadata block was not found. Setting CardData to empty.
|
154 |
+
[default0]:07/03/2024 22:43:38 [WARNING|DP=0|PP=0|TP=0|ip-26-0-162-233]: Repo card metadata block was not found. Setting CardData to empty.
|
155 |
+
[default0]:07/03/2024 22:43:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [Training Plan] There are 1 training stages
|
156 |
+
[default0]:07/03/2024 22:43:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [Stage Training Stage] start from step 1
|
157 |
+
[default0]:07/03/2024 22:43:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]:
|
158 |
+
[default0]:07/03/2024 22:43:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: [Start training] datetime: 2024-07-03 22:43:40.213744 | mbs: 16 | grad_accum: 32 | global_batch_size: 1024 | sequence_length: 4096 | train_steps: 20 | start_iteration_step: 0 | consumed_train_samples: 0
|
159 |
+
[default0]:07/03/2024 22:43:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Resuming training from stage Training Stage, it has trained for 0 samples and has 19 remaining train steps
|
160 |
+
[default0]:07/03/2024 22:43:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 2142.76MiB. Peak allocated 2142.76MiB. Peak reserved: 2198.00MiB
|
161 |
+
[default4]:07/03/2024 22:43:40 [WARNING|DP=1|PP=0|TP=0|ip-26-0-162-233]: Repo card metadata block was not found. Setting CardData to empty.
|
162 |
+
[default2]:Repo card metadata block was not found. Setting CardData to empty.
|
163 |
+
[default4]:Repo card metadata block was not found. Setting CardData to empty.
|
164 |
+
[default7]:Repo card metadata block was not found. Setting CardData to empty.
|
165 |
+
[default5]:07/03/2024 22:43:40 [WARNING|DP=1|PP=0|TP=1|ip-26-0-162-233]: Repo card metadata block was not found. Setting CardData to empty.
|
166 |
+
[default2]:07/03/2024 22:43:40 [WARNING|DP=0|PP=0|TP=2|ip-26-0-162-233]: Repo card metadata block was not found. Setting CardData to empty.
|
167 |
+
[default7]:07/03/2024 22:43:40 [WARNING|DP=1|PP=0|TP=3|ip-26-0-162-233]: Repo card metadata block was not found. Setting CardData to empty.
|
168 |
+
[default5]:Repo card metadata block was not found. Setting CardData to empty.
|
169 |
+
[default6]:Repo card metadata block was not found. Setting CardData to empty.
|
170 |
+
[default6]:07/03/2024 22:43:40 [WARNING|DP=1|PP=0|TP=2|ip-26-0-162-233]: Repo card metadata block was not found. Setting CardData to empty.
|
171 |
+
[default3]:07/03/2024 22:43:40 [WARNING|DP=0|PP=0|TP=3|ip-26-0-162-233]: Repo card metadata block was not found. Setting CardData to empty.
|
172 |
+
[default1]:Repo card metadata block was not found. Setting CardData to empty.
|
173 |
+
[default1]:07/03/2024 22:43:40 [WARNING|DP=0|PP=0|TP=1|ip-26-0-162-233]: Repo card metadata block was not found. Setting CardData to empty.
|
174 |
+
[default3]:Repo card metadata block was not found. Setting CardData to empty.
|
175 |
+
[default2]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
176 |
+
[default2]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
177 |
+
[default1]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
178 |
+
[default1]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
179 |
+
[default3]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
180 |
+
[default3]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
181 |
+
[default0]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
182 |
+
[default0]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
183 |
+
[default7]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
184 |
+
[default7]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
185 |
+
[default4]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
186 |
+
[default4]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
187 |
+
[default5]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
188 |
+
[default5]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
189 |
+
[default6]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/autograd/graph.py:744: UserWarning: c10d::allreduce_: an autograd kernel was not registered to the Autograd key(s) but we are trying to backprop through it. This may lead to silently incorrect behavior. This behavior is deprecated and will be removed in a future version of PyTorch. If your operator is differentiable, please ensure you have registered an autograd kernel to the correct Autograd key (e.g. DispatchKey::Autograd, DispatchKey::CompositeImplicitAutograd). If your operator is not differentiable, or to squash this warning and use the previous behavior, please register torch::CppFunction::makeFallthrough() to DispatchKey::Autograd. (Triggered internally at ../torch/csrc/autograd/autograd_not_implemented_fallback.cpp:63.)
|
190 |
+
[default6]: return Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
|
191 |
+
[default5]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
192 |
+
[default5]: warnings.warn(
|
193 |
+
[default2]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
194 |
+
[default2]: warnings.warn(
|
195 |
+
[default3]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
196 |
+
[default0]:07/03/2024 22:44:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 2219.90MiB. Peak allocated 47278.37MiB. Peak reserved: 48996.00MiB
|
197 |
+
[default7]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
198 |
+
[default7]: warnings.warn(
|
199 |
+
[default3]: warnings.warn(
|
200 |
+
[default4]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
201 |
+
[default4]: warnings.warn(
|
202 |
+
[default1]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
203 |
+
[default1]: warnings.warn(
|
204 |
+
[default6]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
205 |
+
[default6]: warnings.warn(
|
206 |
+
[default0]:/fsx/ferdinandmom/miniforge3/envs/env-bench-cluster/lib/python3.10/site-packages/torch/distributed/distributed_c10d.py:2261: UserWarning: torch.distributed.all_reduce_coalesced will be deprecated. If you must use it, please revisit our documentation later at https://pytorch.org/docs/master/distributed.html#collective-functions
|
207 |
+
[default0]: warnings.warn(
|
208 |
+
[default0]:07/03/2024 22:44:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 1 / 20 | consumed_tokens: 4.19M | elapsed_time_per_iteration_ms: 23.8K | tokens_per_sec: 177K | tokens_per_sec_per_gpu: 22.1K | global_batch_size: 1.02K | lm_loss: 11.4 | lr: 0.0001 | model_tflops_per_gpu: 200 | hardware_tflops_per_gpu: 200 | grad_norm: 20.6 | cuda_memory_allocated: 3.44G | cuda_max_memory_reserved: 51.4G | hd_total_memory_tb: 312G | hd_used_memory_tb: 66.1G | hd_free_memory_tb: 246G
|
209 |
+
[default0]:07/03/2024 22:44:03 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 4603.54MiB. Peak reserved: 48996.00MiB
|
210 |
+
[default0]:07/03/2024 22:44:21 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.37MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
211 |
+
[default0]:07/03/2024 22:44:22 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 2 / 20 | consumed_tokens: 8.39M | elapsed_time_per_iteration_ms: 18.3K | tokens_per_sec: 230K | tokens_per_sec_per_gpu: 28.7K | global_batch_size: 1.02K | lm_loss: 11.4 | lr: 9.53e-05 | model_tflops_per_gpu: 261 | hardware_tflops_per_gpu: 261 | grad_norm: 20.7 | cuda_memory_allocated: 3.44G | cuda_max_memory_reserved: 51.4G | hd_total_memory_tb: 312G | hd_used_memory_tb: 66.1G | hd_free_memory_tb: 246G
|
212 |
+
[default0]:07/03/2024 22:44:22 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 4603.55MiB. Peak reserved: 48998.00MiB
|
213 |
+
[default0]:07/03/2024 22:44:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.37MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
214 |
+
[default0]:07/03/2024 22:44:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 3 / 20 | consumed_tokens: 12.6M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.8K | global_batch_size: 1.02K | lm_loss: 11.6 | lr: 9.05e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 195 | cuda_memory_allocated: 3.44G | cuda_max_memory_reserved: 51.4G | hd_total_memory_tb: 312G | hd_used_memory_tb: 66.1G | hd_free_memory_tb: 246G
|
215 |
+
[default0]:07/03/2024 22:44:40 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 4603.55MiB. Peak reserved: 48998.00MiB
|
216 |
+
[default0]:STAGE:2024-07-03 22:44:40 1835168:1835168 ActivityProfilerController.cpp:314] Completed Stage: Warm Up
|
217 |
+
[default0]:07/03/2024 22:44:58 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.37MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
218 |
+
[default0]:07/03/2024 22:44:58 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 4 / 20 | consumed_tokens: 16.8M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.8K | global_batch_size: 1.02K | lm_loss: 13.6 | lr: 8.58e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 28.1 | cuda_memory_allocated: 3.44G | cuda_max_memory_reserved: 51.4G | hd_total_memory_tb: 312G | hd_used_memory_tb: 66.1G | hd_free_memory_tb: 246G
|
219 |
+
[default0]:07/03/2024 22:44:58 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 4603.55MiB. Peak reserved: 48998.00MiB
|
220 |
+
[default0]:07/03/2024 22:45:16 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 5 / 20 | consumed_tokens: 21M | elapsed_time_per_iteration_ms: 18.1K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 12 | lr: 8.11e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 48.8
|
221 |
+
[default0]:07/03/2024 22:45:16 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
222 |
+
[default0]:07/03/2024 22:45:34 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 6 / 20 | consumed_tokens: 25.2M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 10.9 | lr: 7.63e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 19.7
|
223 |
+
[default0]:STAGE:2024-07-03 22:45:53 1835168:1835168 ActivityProfilerController.cpp:320] Completed Stage: Collection
|
224 |
+
[default0]:STAGE:2024-07-03 22:45:54 1835168:1835168 ActivityProfilerController.cpp:324] Completed Stage: Post Processing
|
225 |
+
[default0]:07/03/2024 22:48:13 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
226 |
+
[default0]:07/03/2024 22:48:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 7 / 20 | consumed_tokens: 29.4M | elapsed_time_per_iteration_ms: 18.1K | tokens_per_sec: 232K | tokens_per_sec_per_gpu: 29K | global_batch_size: 1.02K | lm_loss: 10.4 | lr: 7.16e-05 | model_tflops_per_gpu: 263 | hardware_tflops_per_gpu: 263 | grad_norm: 8.64
|
227 |
+
[default0]:07/03/2024 22:48:31 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
228 |
+
[default0]:07/03/2024 22:48:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 8 / 20 | consumed_tokens: 33.6M | elapsed_time_per_iteration_ms: 18.1K | tokens_per_sec: 232K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 9.66 | lr: 6.68e-05 | model_tflops_per_gpu: 263 | hardware_tflops_per_gpu: 263 | grad_norm: 6.86
|
229 |
+
[default0]:07/03/2024 22:48:49 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
230 |
+
[default0]:07/03/2024 22:49:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 9 / 20 | consumed_tokens: 37.7M | elapsed_time_per_iteration_ms: 18.1K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 11.2 | lr: 6.21e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 52.7
|
231 |
+
[default0]:07/03/2024 22:49:07 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
232 |
+
[default0]:07/03/2024 22:49:25 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 10 / 20 | consumed_tokens: 41.9M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 9.08 | lr: 5.74e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 15.1
|
233 |
+
[default0]:07/03/2024 22:49:25 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
234 |
+
[default0]:07/03/2024 22:49:43 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 11 / 20 | consumed_tokens: 46.1M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 8.54 | lr: 5.26e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 6.74
|
235 |
+
[default0]:07/03/2024 22:49:43 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
236 |
+
[default0]:07/03/2024 22:50:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 12 / 20 | consumed_tokens: 50.3M | elapsed_time_per_iteration_ms: 18.1K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 8.34 | lr: 4.79e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 5.79
|
237 |
+
[default0]:07/03/2024 22:50:02 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
238 |
+
[default0]:07/03/2024 22:50:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 13 / 20 | consumed_tokens: 54.5M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 8.11 | lr: 4.32e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 5.58
|
239 |
+
[default0]:07/03/2024 22:50:20 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
240 |
+
[default0]:07/03/2024 22:50:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 14 / 20 | consumed_tokens: 58.7M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 7.86 | lr: 3.84e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 5.35
|
241 |
+
[default0]:07/03/2024 22:50:38 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
242 |
+
[default0]:07/03/2024 22:50:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 15 / 20 | consumed_tokens: 62.9M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 7.64 | lr: 3.37e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 4.87
|
243 |
+
[default0]:07/03/2024 22:50:56 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
244 |
+
[default0]:07/03/2024 22:51:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 16 / 20 | consumed_tokens: 67.1M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 7.53 | lr: 2.89e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 5.02
|
245 |
+
[default0]:07/03/2024 22:51:14 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
246 |
+
[default0]:07/03/2024 22:51:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 17 / 20 | consumed_tokens: 71.3M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 7.46 | lr: 2.42e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 5.63
|
247 |
+
[default0]:07/03/2024 22:51:32 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
248 |
+
[default0]:07/03/2024 22:51:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 18 / 20 | consumed_tokens: 75.5M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 231K | tokens_per_sec_per_gpu: 28.9K | global_batch_size: 1.02K | lm_loss: 7.33 | lr: 1.95e-05 | model_tflops_per_gpu: 262 | hardware_tflops_per_gpu: 262 | grad_norm: 5.41
|
249 |
+
[default0]:07/03/2024 22:51:51 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
250 |
+
[default0]:07/03/2024 22:52:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 19 / 20 | consumed_tokens: 79.7M | elapsed_time_per_iteration_ms: 18.2K | tokens_per_sec: 230K | tokens_per_sec_per_gpu: 28.7K | global_batch_size: 1.02K | lm_loss: 7.19 | lr: 1.47e-05 | model_tflops_per_gpu: 261 | hardware_tflops_per_gpu: 261 | grad_norm: 3.12
|
251 |
+
[default0]:07/03/2024 22:52:09 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: Memory usage: 3280.35MiB. Peak allocated 48338.84MiB. Peak reserved: 48998.00MiB
|
252 |
+
[default0]:07/03/2024 22:52:27 [INFO|DP=0|PP=0|TP=0|ip-26-0-162-233]: iteration: 20 / 20 | consumed_tokens: 83.9M | elapsed_time_per_iteration_ms: 18.3K | tokens_per_sec: 229K | tokens_per_sec_per_gpu: 28.6K | global_batch_size: 1.02K | lm_loss: 7.14 | lr: 1e-05 | model_tflops_per_gpu: 260 | hardware_tflops_per_gpu: 260 | grad_norm: 3.66
|
253 |
+
Saved 1 csv files over 1 completed logs
|
254 |
+
Processing file: /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/profiler/ip-26-0-162-233_1835168.1720046865197689959.pt.trace.json
|
255 |
+
Results written to /fsx/ferdinandmom/ferdinand-hf/bench_cluster/results/llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/profiler.csv
|
256 |
+
Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See https://huggingface.co/docs/huggingface_hub/hf_transfer for more details.
|
257 |
+
|
llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/log_metrics.csv
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
iteration,consumed_tokens,elapsed_time_per_iteration_ms,tokens_per_sec,tokens_per_sec_per_gpu,global_batch_size,lm_loss,lr,model_tflops_per_gpu,hardware_tflops_per_gpu,grad_norm,memory_usage_MiB,peak_allocated_MiB,peak_reserved_MiB
|
2 |
+
1,4190000.0000000005,23800.0,177000.0,22100.0,1020.0,11.4,0.0001,200.0,200.0,20.6,3280.37,48338.84,48998.0
|
3 |
+
2,8390000.0,18300.0,230000.0,28700.0,1020.0,11.4,9.53e-05,261.0,261.0,20.7,3280.37,48338.84,48998.0
|
4 |
+
3,12600000.0,18200.0,231000.0,28800.0,1020.0,11.6,9.05e-05,262.0,262.0,195.0,3280.37,48338.84,48998.0
|
5 |
+
4,16800000.0,18200.0,231000.0,28800.0,1020.0,13.6,8.58e-05,262.0,262.0,28.1,3280.35,4603.55,48998.0
|
6 |
+
5,21000000.0,18100.0,231000.0,28900.0,1020.0,12.0,8.11e-05,262.0,262.0,48.8,3280.35,48338.84,48998.0
|
7 |
+
6,25200000.0,18200.0,231000.0,28900.0,1020.0,10.9,7.63e-05,262.0,262.0,19.7,3280.35,48338.84,48998.0
|
8 |
+
7,29400000.0,18100.0,232000.0,29000.0,1020.0,10.4,7.16e-05,263.0,263.0,8.64,3280.35,48338.84,48998.0
|
9 |
+
8,33600000.0,18100.0,232000.0,28900.0,1020.0,9.66,6.68e-05,263.0,263.0,6.86,3280.35,48338.84,48998.0
|
10 |
+
9,37700000.0,18100.0,231000.0,28900.0,1020.0,11.2,6.21e-05,262.0,262.0,52.7,3280.35,48338.84,48998.0
|
11 |
+
10,41900000.0,18200.0,231000.0,28900.0,1020.0,9.08,5.74e-05,262.0,262.0,15.1,3280.35,48338.84,48998.0
|
12 |
+
11,46100000.0,18200.0,231000.0,28900.0,1020.0,8.54,5.26e-05,262.0,262.0,6.74,3280.35,48338.84,48998.0
|
13 |
+
12,50300000.0,18100.0,231000.0,28900.0,1020.0,8.34,4.79e-05,262.0,262.0,5.79,3280.35,48338.84,48998.0
|
14 |
+
13,54500000.0,18200.0,231000.0,28900.0,1020.0,8.11,4.32e-05,262.0,262.0,5.58,3280.35,48338.84,48998.0
|
15 |
+
14,58700000.0,18200.0,231000.0,28900.0,1020.0,7.86,3.84e-05,262.0,262.0,5.35,3280.35,48338.84,48998.0
|
16 |
+
15,62900000.0,18200.0,231000.0,28900.0,1020.0,7.64,3.37e-05,262.0,262.0,4.87,3280.35,48338.84,48998.0
|
17 |
+
16,67099999.99999999,18200.0,231000.0,28900.0,1020.0,7.53,2.89e-05,262.0,262.0,5.02,3280.35,48338.84,48998.0
|
18 |
+
17,71300000.0,18200.0,231000.0,28900.0,1020.0,7.46,2.42e-05,262.0,262.0,5.63,3280.35,48338.84,48998.0
|
19 |
+
18,75500000.0,18200.0,231000.0,28900.0,1020.0,7.33,1.95e-05,262.0,262.0,5.41,3280.35,48338.84,48998.0
|
20 |
+
19,79700000.0,18200.0,230000.0,28700.0,1020.0,7.19,1.47e-05,261.0,261.0,3.12,3280.35,48338.84,48998.0
|
21 |
+
20,83900000.0,18300.0,229000.0,28600.0,1020.0,7.14,1e-05,260.0,260.0,3.66,,,
|
llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/profiler.csv
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
forward,backward
|
2 |
+
1ms 91μs,0ms 486μs
|
llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/profiler/ip-26-0-162-233_1835168.1720046865197689959.pt.trace.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fc55f1ebfea96bdde4b53832ea4f4ffd313f44c4b3ad68e2d743605f2a404317
|
3 |
+
size 4562131238
|
llama-1B/8_GPUS/dp-2_tp-4_pp-1_mbz-16/status.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
completed
|