Tristan commited on
Commit
9a92e2f
1 Parent(s): 107559b

Training in progress, epoch 0

Browse files
eval_job_output.txt CHANGED
@@ -1,4 +1,4 @@
1
- slurm submission log: 2024-05-26 22:30:16.881627
2
  created following sbatch script:
3
 
4
  ###############################
@@ -7,13 +7,13 @@ created following sbatch script:
7
 
8
  #SBATCH --account=nlp
9
  #SBATCH --cpus-per-task=16
10
- #SBATCH --dependency=afterok:7653571
11
  #SBATCH --gres=gpu:1
12
- #SBATCH --job-name=tthrush-job-1485795
13
  #SBATCH --mem=60G
14
  #SBATCH --nodelist=sphinx1
15
  #SBATCH --open-mode=append
16
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1/eval_job_output.txt
17
  #SBATCH --partition=sphinx
18
  #SBATCH --time=14-0
19
 
@@ -24,7 +24,7 @@ created following sbatch script:
24
  cd .
25
 
26
  # launch commands
27
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1/perf'
28
 
29
  ###############################
30
 
@@ -34,55 +34,67 @@ submission to slurm complete!
34
  ###############################
35
  slurm submission output
36
 
37
- Submitted batch job 7653572
38
 
39
 
40
 
41
  ###############################
42
 
43
- slurm submission log: 2024-05-26 22:32:57.780747
44
- created following sbatch script:
45
-
46
  ###############################
47
-
48
- #!/bin/bash
49
-
50
- #SBATCH --account=nlp
51
- #SBATCH --cpus-per-task=16
52
- #SBATCH --dependency=afterok:7653601
53
- #SBATCH --gres=gpu:1
54
- #SBATCH --job-name=tthrush-job-1570626
55
- #SBATCH --mem=60G
56
- #SBATCH --nodelist=sphinx1
57
- #SBATCH --open-mode=append
58
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1/eval_job_output.txt
59
- #SBATCH --partition=sphinx
60
- #SBATCH --time=14-0
61
-
62
- # activate your desired anaconda environment
63
- . /nlp/scr/tthrush/miniconda3/etc/profile.d/conda.sh ; conda activate pretraining-coreset-selection
64
-
65
- # cd to working directory
66
- cd .
67
-
68
- # launch commands
69
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1/perf'
70
-
71
  ###############################
 
72
 
73
- submission to slurm complete!
74
 
75
 
76
  ###############################
77
- slurm submission output
78
-
79
- Submitted batch job 7653602
80
-
81
 
82
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
  ###############################
84
-
85
- slurm submission log: 2024-05-26 22:58:10.061545
 
86
  created following sbatch script:
87
 
88
  ###############################
@@ -91,13 +103,13 @@ created following sbatch script:
91
 
92
  #SBATCH --account=nlp
93
  #SBATCH --cpus-per-task=16
94
- #SBATCH --dependency=afterok:7653656
95
  #SBATCH --gres=gpu:1
96
- #SBATCH --job-name=tthrush-job-2260487
97
  #SBATCH --mem=60G
98
  #SBATCH --nodelist=sphinx1
99
  #SBATCH --open-mode=append
100
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1/eval_job_output.txt
101
  #SBATCH --partition=sphinx
102
  #SBATCH --time=14-0
103
 
@@ -108,7 +120,7 @@ created following sbatch script:
108
  cd .
109
 
110
  # launch commands
111
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1/perf'
112
 
113
  ###############################
114
 
@@ -118,13 +130,13 @@ submission to slurm complete!
118
  ###############################
119
  slurm submission output
120
 
121
- Submitted batch job 7653657
122
 
123
 
124
 
125
  ###############################
126
 
127
- slurm submission log: 2024-05-26 23:16:43.694904
128
  created following sbatch script:
129
 
130
  ###############################
@@ -133,13 +145,13 @@ created following sbatch script:
133
 
134
  #SBATCH --account=nlp
135
  #SBATCH --cpus-per-task=16
136
- #SBATCH --dependency=afterok:7653713
137
  #SBATCH --gres=gpu:1
138
- #SBATCH --job-name=tthrush-job-1947317
139
  #SBATCH --mem=60G
140
  #SBATCH --nodelist=sphinx1
141
  #SBATCH --open-mode=append
142
- #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1/eval_job_output.txt
143
  #SBATCH --partition=sphinx
144
  #SBATCH --time=14-0
145
 
@@ -150,7 +162,7 @@ created following sbatch script:
150
  cd .
151
 
152
  # launch commands
153
- srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection/llms/pythia-70m_sciq_1/perf'
154
 
155
  ###############################
156
 
@@ -160,7 +172,7 @@ submission to slurm complete!
160
  ###############################
161
  slurm submission output
162
 
163
- Submitted batch job 7653714
164
 
165
 
166
 
 
1
+ slurm submission log: 2024-05-27 12:34:08.291942
2
  created following sbatch script:
3
 
4
  ###############################
 
7
 
8
  #SBATCH --account=nlp
9
  #SBATCH --cpus-per-task=16
10
+ #SBATCH --dependency=afterok:7656663
11
  #SBATCH --gres=gpu:1
12
+ #SBATCH --job-name=tthrush-job-3309802
13
  #SBATCH --mem=60G
14
  #SBATCH --nodelist=sphinx1
15
  #SBATCH --open-mode=append
16
+ #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1/eval_job_output.txt
17
  #SBATCH --partition=sphinx
18
  #SBATCH --time=14-0
19
 
 
24
  cd .
25
 
26
  # launch commands
27
+ srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1/perf'
28
 
29
  ###############################
30
 
 
34
  ###############################
35
  slurm submission output
36
 
37
+ Submitted batch job 7656664
38
 
39
 
40
 
41
  ###############################
42
 
 
 
 
43
  ###############################
44
+ start time: 2024-05-27 12:47:58.516681
45
+ machine: sphinx1
46
+ conda env: pretraining-coreset-selection
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  ###############################
48
+ running following processes
49
 
50
+ lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1/perf
51
 
52
 
53
  ###############################
54
+ command outputs:
 
 
 
55
 
56
 
57
+ 2024-05-27:12:48:01,194 INFO [utils.py:145] Note: detected 255 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
58
+ 2024-05-27:12:48:01,194 INFO [utils.py:148] Note: NumExpr detected 255 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
59
+ 2024-05-27:12:48:01,194 INFO [utils.py:160] NumExpr defaulting to 8 threads.
60
+ 2024-05-27:12:48:01,508 INFO [config.py:58] PyTorch version 2.2.2 available.
61
+ 2024-05-27:12:48:05,349 INFO [__main__.py:156] Verbosity set to INFO
62
+ 2024-05-27:12:48:12,734 WARNING [__init__.py:194] Some tasks could not be loaded due to missing dependencies. Run with `--verbosity DEBUG` for full details.
63
+ /nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/lib/python3.10/site-packages/datasets/load.py:1429: FutureWarning: The repository for hails/mmlu_no_train contains custom code which must be executed to correctly load the dataset. You can inspect the repository content at https://hf.co/datasets/hails/mmlu_no_train
64
+ You can avoid this message in future by passing the argument `trust_remote_code=True`.
65
+ Passing `trust_remote_code=True` will be mandatory to load this dataset from the next major release of `datasets`.
66
+ warnings.warn(
67
+ 2024-05-27:12:49:29,060 WARNING [__init__.py:194] Some tasks could not be loaded due to missing dependencies. Run with `--verbosity DEBUG` for full details.
68
+ 2024-05-27:12:49:29,064 INFO [__main__.py:229] Selected Tasks: ['arc_easy', 'lambada', 'piqa', 'sciq', 'xnli_de', 'xnli_en', 'xnli_es', 'xnli_fr']
69
+ 2024-05-27:12:49:29,360 INFO [huggingface.py:148] Using device 'cuda'
70
+ Traceback (most recent call last):
71
+ File "/nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/bin/lm_eval", line 8, in <module>
72
+ sys.exit(cli_evaluate())
73
+ File "/sailhome/tthrush/lm-evaluation-harness/lm_eval/__main__.py", line 231, in cli_evaluate
74
+ results = evaluator.simple_evaluate(
75
+ File "/sailhome/tthrush/lm-evaluation-harness/lm_eval/utils.py", line 415, in _wrapper
76
+ return fn(*args, **kwargs)
77
+ File "/sailhome/tthrush/lm-evaluation-harness/lm_eval/evaluator.py", line 98, in simple_evaluate
78
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
79
+ File "/sailhome/tthrush/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
80
+ return cls(**args, **args2)
81
+ File "/sailhome/tthrush/lm-evaluation-harness/lm_eval/models/huggingface.py", line 174, in __init__
82
+ self._get_config(
83
+ File "/sailhome/tthrush/lm-evaluation-harness/lm_eval/models/huggingface.py", line 420, in _get_config
84
+ self._config = transformers.AutoConfig.from_pretrained(
85
+ File "/nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py", line 928, in from_pretrained
86
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
87
+ File "/nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/lib/python3.10/site-packages/transformers/configuration_utils.py", line 631, in get_config_dict
88
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
89
+ File "/nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/lib/python3.10/site-packages/transformers/configuration_utils.py", line 686, in _get_config_dict
90
+ resolved_config_file = cached_file(
91
+ File "/nlp/scr/tthrush/miniconda3/envs/pretraining-coreset-selection/lib/python3.10/site-packages/transformers/utils/hub.py", line 369, in cached_file
92
+ raise EnvironmentError(
93
+ OSError: /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1 does not appear to have a file named config.json. Checkout 'https://huggingface.co//juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1/tree/main' for available files.
94
  ###############################
95
+ end time: 2024-05-27 12:49:38.627024
96
+ elapsed time: 0:01:40.110343
97
+ slurm submission log: 2024-05-27 23:18:54.461453
98
  created following sbatch script:
99
 
100
  ###############################
 
103
 
104
  #SBATCH --account=nlp
105
  #SBATCH --cpus-per-task=16
106
+ #SBATCH --dependency=afterok:7659769
107
  #SBATCH --gres=gpu:1
108
+ #SBATCH --job-name=tthrush-job-2147042
109
  #SBATCH --mem=60G
110
  #SBATCH --nodelist=sphinx1
111
  #SBATCH --open-mode=append
112
+ #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1/eval_job_output.txt
113
  #SBATCH --partition=sphinx
114
  #SBATCH --time=14-0
115
 
 
120
  cd .
121
 
122
  # launch commands
123
+ srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1/perf'
124
 
125
  ###############################
126
 
 
130
  ###############################
131
  slurm submission output
132
 
133
+ Submitted batch job 7659770
134
 
135
 
136
 
137
  ###############################
138
 
139
+ slurm submission log: 2024-05-27 23:24:16.563525
140
  created following sbatch script:
141
 
142
  ###############################
 
145
 
146
  #SBATCH --account=nlp
147
  #SBATCH --cpus-per-task=16
148
+ #SBATCH --dependency=afterok:7659811
149
  #SBATCH --gres=gpu:1
150
+ #SBATCH --job-name=tthrush-job-2456365
151
  #SBATCH --mem=60G
152
  #SBATCH --nodelist=sphinx1
153
  #SBATCH --open-mode=append
154
+ #SBATCH --output=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1/eval_job_output.txt
155
  #SBATCH --partition=sphinx
156
  #SBATCH --time=14-0
157
 
 
162
  cd .
163
 
164
  # launch commands
165
+ srun --unbuffered run_as_child_processes 'lm_eval --model hf --model_args pretrained=/juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1,revision=main,dtype=float16,trust_remote_code=True --tasks piqa,arc_easy,xnli_en,xnli_fr,xnli_de,xnli_es,sciq,lambada --device cuda --output_path /juice5/scr5/tthrush/pretraining-coreset-selection/llm_pretraining/test_ordinal_projection_big_diff/llms/pythia-70m_sciq_1/perf'
166
 
167
  ###############################
168
 
 
172
  ###############################
173
  slurm submission output
174
 
175
+ Submitted batch job 7659812
176
 
177
 
178
 
logs/events.out.tfevents.1716911271.sphinx2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4de90193dc0724c97d17ad167ffe10e7294fb99fce2623f40f81de90401dd6e0
3
+ size 11121
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f97eeaa124523f4dbf33c42f1b330e4f59310ede161bb1e410a0b9dd0d11762
3
  size 281715176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9db3478d5d4b71d3020b911fdba8acdf616efc73e782aad3c13ed3579f2d5633
3
  size 281715176
train_job_output.txt CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:04627cb5fbe82be9fcc5f4e83b525aa07f866ab882c470a7103ad6fc1b58ed72
3
- size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c742f4f4757649b6669d2c7a37926b33e976e9d67c8f290e013c6f1820c0d79
3
+ size 5240