fxmarty commited on
Commit
86506f6
1 Parent(s): 85052bd

Adding regression benchmark for the transformers SHA 408b2b3c5057b275855ae4c43c452a7f0b37aa45

Browse files
Files changed (38) hide show
  1. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/.config/config.yaml +75 -0
  2. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/.config/hydra.yaml +174 -0
  3. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/.config/overrides.yaml +2 -0
  4. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/experiment.log +15 -0
  5. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/hydra_config.yaml +75 -0
  6. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/training_results.csv +2 -0
  7. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/.config/config.yaml +75 -0
  8. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/.config/hydra.yaml +174 -0
  9. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/.config/overrides.yaml +2 -0
  10. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/experiment.log +14 -0
  11. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/hydra_config.yaml +75 -0
  12. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/training_results.csv +2 -0
  13. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/multirun.yaml +246 -0
  14. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/.config/config.yaml +73 -0
  15. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/.config/hydra.yaml +174 -0
  16. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/.config/overrides.yaml +2 -0
  17. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/experiment.log +25 -0
  18. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/hydra_config.yaml +79 -0
  19. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/inference_results.csv +2 -0
  20. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/.config/config.yaml +73 -0
  21. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/.config/hydra.yaml +174 -0
  22. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/.config/overrides.yaml +2 -0
  23. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/experiment.log +24 -0
  24. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/hydra_config.yaml +79 -0
  25. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/inference_results.csv +2 -0
  26. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/.config/config.yaml +73 -0
  27. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/.config/hydra.yaml +174 -0
  28. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/.config/overrides.yaml +2 -0
  29. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/experiment.log +24 -0
  30. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/hydra_config.yaml +79 -0
  31. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/inference_results.csv +2 -0
  32. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/.config/config.yaml +73 -0
  33. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/.config/hydra.yaml +174 -0
  34. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/.config/overrides.yaml +2 -0
  35. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/experiment.log +24 -0
  36. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/hydra_config.yaml +79 -0
  37. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/inference_results.csv +2 -0
  38. raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/multirun.yaml +245 -0
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/.config/config.yaml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: ${pytorch_version:}
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float16
14
+ disable_grad: ${is_inference:${benchmark.name}}
15
+ eval_mode: ${is_inference:${benchmark.name}}
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: training
29
+ _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark
30
+ warmup_steps: 40
31
+ dataset_shapes:
32
+ dataset_size: 1500
33
+ sequence_length: 256
34
+ num_choices: 1
35
+ feature_size: 80
36
+ nb_max_frames: 3000
37
+ audio_sequence_length: 16000
38
+ training_arguments:
39
+ skip_memory_metrics: true
40
+ output_dir: ./trainer_output
41
+ use_cpu: ${is_cpu:${device}}
42
+ ddp_find_unused_parameters: false
43
+ do_train: true
44
+ do_eval: false
45
+ do_predict: false
46
+ report_to: none
47
+ per_device_train_batch_size: 32
48
+ experiment_name: bert_1gpu_training
49
+ model: bert-base-uncased
50
+ device: cuda
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.13.1
59
+ transformers_version: 4.34.0.dev0
60
+ accelerate_version: 0.23.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' AMD EPYC 7643 48-Core Processor'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1082028
67
+ gpus:
68
+ - Instinct MI210
69
+ - Instinct MI210
70
+ - Instinct MI210
71
+ - Instinct MI210
72
+ - Instinct MI210
73
+ - Instinct MI210
74
+ - Instinct MI210
75
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ +benchmark.training_arguments.per_device_train_batch_size: '32'
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - +benchmark.training_arguments.per_device_train_batch_size=32
126
+ - backend.torch_dtype=float16
127
+ job:
128
+ name: experiment
129
+ chdir: true
130
+ override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16
131
+ id: '0'
132
+ num: 0
133
+ config_name: bert_1gpu_training
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0
162
+ choices:
163
+ benchmark: training
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - +benchmark.training_arguments.per_device_train_batch_size=32
2
+ - backend.torch_dtype=float16
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/experiment.log ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-09-27 13:00:27,741][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi.
2
+ [2023-09-27 13:00:30,281][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
3
+ [2023-09-27 13:00:30,281][backend][INFO] - Configuring pytorch backend
4
+ [2023-09-27 13:00:30,284][pytorch][INFO] - + Loading model on device: cuda
5
+ [2023-09-27 13:00:31,077][benchmark][INFO] - Configuring training benchmark
6
+ [2023-09-27 13:00:31,077][training][INFO] - Running training benchmark
7
+ [2023-09-27 13:00:31,078][dataset_generator][INFO] - Using text-classification task generator
8
+ [2023-09-27 13:00:31,126][pytorch][INFO] - + Setting dataset format to `torch`.
9
+ [2023-09-27 13:00:31,127][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments
10
+ [2023-09-27 13:00:31,128][pytorch][INFO] - + Wrapping model with transformers.Trainer
11
+ [2023-09-27 13:00:31,132][pytorch][INFO] - + Starting training
12
+ [2023-09-27 13:00:47,161][pytorch][INFO] - + Training finished successfully
13
+ [2023-09-27 13:00:47,162][training][INFO] - Saving training results
14
+ [2023-09-27 13:00:47,165][backend][INFO] - Cleaning pytorch backend
15
+ [2023-09-27 13:00:47,165][backend][INFO] - + Deleting pretrained model
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/hydra_config.yaml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.1.0+rocm5.6
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float16
14
+ disable_grad: false
15
+ eval_mode: false
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: training
29
+ _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark
30
+ warmup_steps: 40
31
+ dataset_shapes:
32
+ dataset_size: 1500
33
+ sequence_length: 256
34
+ num_choices: 1
35
+ feature_size: 80
36
+ nb_max_frames: 3000
37
+ audio_sequence_length: 16000
38
+ training_arguments:
39
+ skip_memory_metrics: true
40
+ output_dir: ./trainer_output
41
+ use_cpu: false
42
+ ddp_find_unused_parameters: false
43
+ do_train: true
44
+ do_eval: false
45
+ do_predict: false
46
+ report_to: none
47
+ per_device_train_batch_size: 32
48
+ experiment_name: bert_1gpu_training
49
+ model: bert-base-uncased
50
+ device: cuda
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.13.1
59
+ transformers_version: 4.34.0.dev0
60
+ accelerate_version: 0.23.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' AMD EPYC 7643 48-Core Processor'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1082028
67
+ gpus:
68
+ - Instinct MI210
69
+ - Instinct MI210
70
+ - Instinct MI210
71
+ - Instinct MI210
72
+ - Instinct MI210
73
+ - Instinct MI210
74
+ - Instinct MI210
75
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/0/training_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s)
2
+ 4.883371829986572,262.11397463942853,11.04150652885437,292.71367920255545,15.924879789352417,202.952866379623
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/.config/config.yaml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: ${pytorch_version:}
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float32
14
+ disable_grad: ${is_inference:${benchmark.name}}
15
+ eval_mode: ${is_inference:${benchmark.name}}
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: training
29
+ _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark
30
+ warmup_steps: 40
31
+ dataset_shapes:
32
+ dataset_size: 1500
33
+ sequence_length: 256
34
+ num_choices: 1
35
+ feature_size: 80
36
+ nb_max_frames: 3000
37
+ audio_sequence_length: 16000
38
+ training_arguments:
39
+ skip_memory_metrics: true
40
+ output_dir: ./trainer_output
41
+ use_cpu: ${is_cpu:${device}}
42
+ ddp_find_unused_parameters: false
43
+ do_train: true
44
+ do_eval: false
45
+ do_predict: false
46
+ report_to: none
47
+ per_device_train_batch_size: 32
48
+ experiment_name: bert_1gpu_training
49
+ model: bert-base-uncased
50
+ device: cuda
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.13.1
59
+ transformers_version: 4.34.0.dev0
60
+ accelerate_version: 0.23.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' AMD EPYC 7643 48-Core Processor'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1082028
67
+ gpus:
68
+ - Instinct MI210
69
+ - Instinct MI210
70
+ - Instinct MI210
71
+ - Instinct MI210
72
+ - Instinct MI210
73
+ - Instinct MI210
74
+ - Instinct MI210
75
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ +benchmark.training_arguments.per_device_train_batch_size: '32'
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - +benchmark.training_arguments.per_device_train_batch_size=32
126
+ - backend.torch_dtype=float32
127
+ job:
128
+ name: experiment
129
+ chdir: true
130
+ override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32
131
+ id: '1'
132
+ num: 1
133
+ config_name: bert_1gpu_training
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1
162
+ choices:
163
+ benchmark: training
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - +benchmark.training_arguments.per_device_train_batch_size=32
2
+ - backend.torch_dtype=float32
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/experiment.log ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-09-27 13:00:48,856][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert
2
+ [2023-09-27 13:00:48,856][backend][INFO] - Configuring pytorch backend
3
+ [2023-09-27 13:00:48,857][pytorch][INFO] - + Loading model on device: cuda
4
+ [2023-09-27 13:00:49,308][benchmark][INFO] - Configuring training benchmark
5
+ [2023-09-27 13:00:49,308][training][INFO] - Running training benchmark
6
+ [2023-09-27 13:00:49,308][dataset_generator][INFO] - Using text-classification task generator
7
+ [2023-09-27 13:00:49,336][pytorch][INFO] - + Setting dataset format to `torch`.
8
+ [2023-09-27 13:00:49,337][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments
9
+ [2023-09-27 13:00:49,337][pytorch][INFO] - + Wrapping model with transformers.Trainer
10
+ [2023-09-27 13:00:49,341][pytorch][INFO] - + Starting training
11
+ [2023-09-27 13:01:22,930][pytorch][INFO] - + Training finished successfully
12
+ [2023-09-27 13:01:22,931][training][INFO] - Saving training results
13
+ [2023-09-27 13:01:22,932][backend][INFO] - Cleaning pytorch backend
14
+ [2023-09-27 13:01:22,933][backend][INFO] - + Deleting pretrained model
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/hydra_config.yaml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.1.0+rocm5.6
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float32
14
+ disable_grad: false
15
+ eval_mode: false
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: training
29
+ _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark
30
+ warmup_steps: 40
31
+ dataset_shapes:
32
+ dataset_size: 1500
33
+ sequence_length: 256
34
+ num_choices: 1
35
+ feature_size: 80
36
+ nb_max_frames: 3000
37
+ audio_sequence_length: 16000
38
+ training_arguments:
39
+ skip_memory_metrics: true
40
+ output_dir: ./trainer_output
41
+ use_cpu: false
42
+ ddp_find_unused_parameters: false
43
+ do_train: true
44
+ do_eval: false
45
+ do_predict: false
46
+ report_to: none
47
+ per_device_train_batch_size: 32
48
+ experiment_name: bert_1gpu_training
49
+ model: bert-base-uncased
50
+ device: cuda
51
+ task: text-classification
52
+ hub_kwargs:
53
+ revision: main
54
+ cache_dir: null
55
+ force_download: false
56
+ local_files_only: false
57
+ environment:
58
+ optimum_version: 1.13.1
59
+ transformers_version: 4.34.0.dev0
60
+ accelerate_version: 0.23.0
61
+ diffusers_version: null
62
+ python_version: 3.10.12
63
+ system: Linux
64
+ cpu: ' AMD EPYC 7643 48-Core Processor'
65
+ cpu_count: 96
66
+ cpu_ram_mb: 1082028
67
+ gpus:
68
+ - Instinct MI210
69
+ - Instinct MI210
70
+ - Instinct MI210
71
+ - Instinct MI210
72
+ - Instinct MI210
73
+ - Instinct MI210
74
+ - Instinct MI210
75
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/1/training_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s)
2
+ 9.614588975906372,133.1310161263897,23.879708766937256,135.34503421058795,33.494298458099365,96.4940347696239
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/bert_1gpu_training/multirun.yaml ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ +benchmark.training_arguments.per_device_train_batch_size: '32'
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task: []
125
+ job:
126
+ name: experiment
127
+ chdir: true
128
+ override_dirname: ''
129
+ id: ???
130
+ num: ???
131
+ config_name: bert_1gpu_training
132
+ env_set: {}
133
+ env_copy: []
134
+ config:
135
+ override_dirname:
136
+ kv_sep: '='
137
+ item_sep: ','
138
+ exclude_keys: []
139
+ runtime:
140
+ version: 1.3.2
141
+ version_base: '1.3'
142
+ cwd: /home/user/transformers-regression
143
+ config_sources:
144
+ - path: hydra.conf
145
+ schema: pkg
146
+ provider: hydra
147
+ - path: optimum_benchmark
148
+ schema: pkg
149
+ provider: main
150
+ - path: hydra_plugins.hydra_colorlog.conf
151
+ schema: pkg
152
+ provider: hydra-colorlog
153
+ - path: /home/user/transformers-regression/configs
154
+ schema: file
155
+ provider: command-line
156
+ - path: ''
157
+ schema: structured
158
+ provider: schema
159
+ output_dir: ???
160
+ choices:
161
+ benchmark: training
162
+ backend: pytorch
163
+ hydra/env: default
164
+ hydra/callbacks: null
165
+ hydra/job_logging: colorlog
166
+ hydra/hydra_logging: colorlog
167
+ hydra/hydra_help: default
168
+ hydra/help: default
169
+ hydra/sweeper: basic
170
+ hydra/launcher: basic
171
+ hydra/output: default
172
+ verbose: false
173
+ backend:
174
+ name: pytorch
175
+ version: ${pytorch_version:}
176
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
177
+ seed: 42
178
+ inter_op_num_threads: null
179
+ intra_op_num_threads: null
180
+ initial_isolation_check: false
181
+ continous_isolation_check: false
182
+ delete_cache: false
183
+ no_weights: false
184
+ device_map: null
185
+ torch_dtype: null
186
+ disable_grad: ${is_inference:${benchmark.name}}
187
+ eval_mode: ${is_inference:${benchmark.name}}
188
+ amp_autocast: false
189
+ amp_dtype: null
190
+ torch_compile: false
191
+ torch_compile_config: {}
192
+ bettertransformer: false
193
+ quantization_scheme: null
194
+ quantization_config: {}
195
+ use_ddp: false
196
+ ddp_config: {}
197
+ peft_strategy: null
198
+ peft_config: {}
199
+ benchmark:
200
+ name: training
201
+ _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark
202
+ warmup_steps: 40
203
+ dataset_shapes:
204
+ dataset_size: 1500
205
+ sequence_length: 256
206
+ num_choices: 1
207
+ feature_size: 80
208
+ nb_max_frames: 3000
209
+ audio_sequence_length: 16000
210
+ training_arguments:
211
+ skip_memory_metrics: true
212
+ output_dir: ./trainer_output
213
+ use_cpu: ${is_cpu:${device}}
214
+ ddp_find_unused_parameters: false
215
+ do_train: true
216
+ do_eval: false
217
+ do_predict: false
218
+ report_to: none
219
+ experiment_name: bert_1gpu_training
220
+ model: bert-base-uncased
221
+ device: cuda
222
+ task: text-classification
223
+ hub_kwargs:
224
+ revision: main
225
+ cache_dir: null
226
+ force_download: false
227
+ local_files_only: false
228
+ environment:
229
+ optimum_version: 1.13.1
230
+ transformers_version: 4.34.0.dev0
231
+ accelerate_version: 0.23.0
232
+ diffusers_version: null
233
+ python_version: 3.10.12
234
+ system: Linux
235
+ cpu: ' AMD EPYC 7643 48-Core Processor'
236
+ cpu_count: 96
237
+ cpu_ram_mb: 1082028
238
+ gpus:
239
+ - Instinct MI210
240
+ - Instinct MI210
241
+ - Instinct MI210
242
+ - Instinct MI210
243
+ - Instinct MI210
244
+ - Instinct MI210
245
+ - Instinct MI210
246
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/.config/config.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: ${pytorch_version:}
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float16
14
+ disable_grad: ${is_inference:${benchmark.name}}
15
+ eval_mode: ${is_inference:${benchmark.name}}
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: inference
29
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
30
+ duration: 15
31
+ warmup_runs: 10
32
+ memory: false
33
+ energy: false
34
+ input_shapes:
35
+ batch_size: 1
36
+ sequence_length: 200
37
+ num_choices: 1
38
+ feature_size: 80
39
+ nb_max_frames: 3000
40
+ audio_sequence_length: 16000
41
+ new_tokens: 200
42
+ can_diffuse: ${can_diffuse:${task}}
43
+ can_generate: ${can_generate:${task}}
44
+ forward_kwargs: {}
45
+ generate_kwargs: {}
46
+ experiment_name: llama_1gpu_inference
47
+ model: meta-llama/Llama-2-7b-chat-hf
48
+ device: cuda
49
+ task: ${infer_task:${model}}
50
+ hub_kwargs:
51
+ revision: main
52
+ cache_dir: null
53
+ force_download: false
54
+ local_files_only: false
55
+ environment:
56
+ optimum_version: 1.13.1
57
+ transformers_version: 4.34.0.dev0
58
+ accelerate_version: 0.23.0
59
+ diffusers_version: null
60
+ python_version: 3.10.12
61
+ system: Linux
62
+ cpu: ' AMD EPYC 7643 48-Core Processor'
63
+ cpu_count: 96
64
+ cpu_ram_mb: 1082028
65
+ gpus:
66
+ - Instinct MI210
67
+ - Instinct MI210
68
+ - Instinct MI210
69
+ - Instinct MI210
70
+ - Instinct MI210
71
+ - Instinct MI210
72
+ - Instinct MI210
73
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - benchmark.input_shapes.batch_size=1
126
+ - backend.torch_dtype=float16
127
+ job:
128
+ name: experiment
129
+ chdir: true
130
+ override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1
131
+ id: '0'
132
+ num: 0
133
+ config_name: llama2_1gpu_inference
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0
162
+ choices:
163
+ benchmark: inference
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - benchmark.input_shapes.batch_size=1
2
+ - backend.torch_dtype=float16
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/experiment.log ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-09-27 13:01:27,277][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200.
2
+ [2023-09-27 13:01:27,665][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi.
3
+ [2023-09-27 13:01:30,031][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
4
+ [2023-09-27 13:01:30,031][backend][INFO] - Configuring pytorch backend
5
+ [2023-09-27 13:01:30,033][pytorch][INFO] - + Disabling gradients
6
+ [2023-09-27 13:01:30,033][pytorch][INFO] - + Loading model on device: cuda
7
+ [2023-09-27 13:01:35,819][pytorch][INFO] - + Turning on model's eval mode
8
+ [2023-09-27 13:01:35,827][benchmark][INFO] - Configuring inference benchmark
9
+ [2023-09-27 13:01:35,827][inference][INFO] - Running inference benchmark
10
+ [2023-09-27 13:01:35,827][input_generator][INFO] - Using llama model type generator
11
+ [2023-09-27 13:01:35,850][inference][INFO] - + Preparing input for the forward pass
12
+ [2023-09-27 13:01:35,850][inference][INFO] - + Warming up the forward pass
13
+ [2023-09-27 13:01:36,798][inference][INFO] - + Tracking forward pass latency and throughput
14
+ [2023-09-27 13:01:36,798][latency_tracker][INFO] - Tracked Pytorch devices: [0]
15
+ [2023-09-27 13:01:51,880][inference][INFO] - + Forward pass latency: 6.68e-02 (s)
16
+ [2023-09-27 13:01:51,881][inference][INFO] - + Forward pass throughput: 15.00 (samples/s)
17
+ [2023-09-27 13:01:51,881][inference][INFO] - + Preparing input for the generation pass
18
+ [2023-09-27 13:01:51,881][inference][INFO] - + Warming up the generation pass
19
+ [2023-09-27 13:01:57,372][inference][INFO] - + Tracking generation latency and throughput
20
+ [2023-09-27 13:01:57,373][latency_tracker][INFO] - Tracked Pytorch devices: [0]
21
+ [2023-09-27 13:02:17,103][inference][INFO] - + Generation pass latency: 4.93e+00 (s)
22
+ [2023-09-27 13:02:17,104][inference][INFO] - + Generation pass throughput: 40.60 (tokens/s)
23
+ [2023-09-27 13:02:17,104][inference][INFO] - Saving inference results
24
+ [2023-09-27 13:02:17,108][backend][INFO] - Cleaning pytorch backend
25
+ [2023-09-27 13:02:17,108][backend][INFO] - + Deleting pretrained model
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/hydra_config.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.1.0+rocm5.6
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float16
14
+ disable_grad: true
15
+ eval_mode: true
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: inference
29
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
30
+ duration: 15
31
+ warmup_runs: 10
32
+ memory: false
33
+ energy: false
34
+ input_shapes:
35
+ batch_size: 1
36
+ sequence_length: 200
37
+ num_choices: 1
38
+ feature_size: 80
39
+ nb_max_frames: 3000
40
+ audio_sequence_length: 16000
41
+ new_tokens: 200
42
+ can_diffuse: false
43
+ can_generate: true
44
+ forward_kwargs: {}
45
+ generate_kwargs:
46
+ max_new_tokens: 200
47
+ min_new_tokens: 200
48
+ do_sample: false
49
+ use_cache: true
50
+ pad_token_id: 0
51
+ num_beams: 1
52
+ experiment_name: llama_1gpu_inference
53
+ model: meta-llama/Llama-2-7b-chat-hf
54
+ device: cuda
55
+ task: text-generation
56
+ hub_kwargs:
57
+ revision: main
58
+ cache_dir: null
59
+ force_download: false
60
+ local_files_only: false
61
+ environment:
62
+ optimum_version: 1.13.1
63
+ transformers_version: 4.34.0.dev0
64
+ accelerate_version: 0.23.0
65
+ diffusers_version: null
66
+ python_version: 3.10.12
67
+ system: Linux
68
+ cpu: ' AMD EPYC 7643 48-Core Processor'
69
+ cpu_count: 96
70
+ cpu_ram_mb: 1082028
71
+ gpus:
72
+ - Instinct MI210
73
+ - Instinct MI210
74
+ - Instinct MI210
75
+ - Instinct MI210
76
+ - Instinct MI210
77
+ - Instinct MI210
78
+ - Instinct MI210
79
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/0/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0.0668,15.0,4.93,40.6
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/.config/config.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: ${pytorch_version:}
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float32
14
+ disable_grad: ${is_inference:${benchmark.name}}
15
+ eval_mode: ${is_inference:${benchmark.name}}
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: inference
29
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
30
+ duration: 15
31
+ warmup_runs: 10
32
+ memory: false
33
+ energy: false
34
+ input_shapes:
35
+ batch_size: 1
36
+ sequence_length: 200
37
+ num_choices: 1
38
+ feature_size: 80
39
+ nb_max_frames: 3000
40
+ audio_sequence_length: 16000
41
+ new_tokens: 200
42
+ can_diffuse: ${can_diffuse:${task}}
43
+ can_generate: ${can_generate:${task}}
44
+ forward_kwargs: {}
45
+ generate_kwargs: {}
46
+ experiment_name: llama_1gpu_inference
47
+ model: meta-llama/Llama-2-7b-chat-hf
48
+ device: cuda
49
+ task: ${infer_task:${model}}
50
+ hub_kwargs:
51
+ revision: main
52
+ cache_dir: null
53
+ force_download: false
54
+ local_files_only: false
55
+ environment:
56
+ optimum_version: 1.13.1
57
+ transformers_version: 4.34.0.dev0
58
+ accelerate_version: 0.23.0
59
+ diffusers_version: null
60
+ python_version: 3.10.12
61
+ system: Linux
62
+ cpu: ' AMD EPYC 7643 48-Core Processor'
63
+ cpu_count: 96
64
+ cpu_ram_mb: 1082028
65
+ gpus:
66
+ - Instinct MI210
67
+ - Instinct MI210
68
+ - Instinct MI210
69
+ - Instinct MI210
70
+ - Instinct MI210
71
+ - Instinct MI210
72
+ - Instinct MI210
73
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - benchmark.input_shapes.batch_size=1
126
+ - backend.torch_dtype=float32
127
+ job:
128
+ name: experiment
129
+ chdir: true
130
+ override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1
131
+ id: '1'
132
+ num: 1
133
+ config_name: llama2_1gpu_inference
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1
162
+ choices:
163
+ benchmark: inference
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - benchmark.input_shapes.batch_size=1
2
+ - backend.torch_dtype=float32
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/experiment.log ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-09-27 13:02:18,364][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200.
2
+ [2023-09-27 13:02:19,634][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
3
+ [2023-09-27 13:02:19,634][backend][INFO] - Configuring pytorch backend
4
+ [2023-09-27 13:02:19,634][pytorch][INFO] - + Disabling gradients
5
+ [2023-09-27 13:02:19,634][pytorch][INFO] - + Loading model on device: cuda
6
+ [2023-09-27 13:02:25,162][pytorch][INFO] - + Turning on model's eval mode
7
+ [2023-09-27 13:02:25,163][benchmark][INFO] - Configuring inference benchmark
8
+ [2023-09-27 13:02:25,163][inference][INFO] - Running inference benchmark
9
+ [2023-09-27 13:02:25,164][input_generator][INFO] - Using llama model type generator
10
+ [2023-09-27 13:02:25,164][inference][INFO] - + Preparing input for the forward pass
11
+ [2023-09-27 13:02:25,164][inference][INFO] - + Warming up the forward pass
12
+ [2023-09-27 13:02:26,325][inference][INFO] - + Tracking forward pass latency and throughput
13
+ [2023-09-27 13:02:26,325][latency_tracker][INFO] - Tracked Pytorch devices: [0]
14
+ [2023-09-27 13:02:41,458][inference][INFO] - + Forward pass latency: 1.14e-01 (s)
15
+ [2023-09-27 13:02:41,459][inference][INFO] - + Forward pass throughput: 8.77 (samples/s)
16
+ [2023-09-27 13:02:41,459][inference][INFO] - + Preparing input for the generation pass
17
+ [2023-09-27 13:02:41,459][inference][INFO] - + Warming up the generation pass
18
+ [2023-09-27 13:02:50,789][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-09-27 13:02:50,790][latency_tracker][INFO] - Tracked Pytorch devices: [0]
20
+ [2023-09-27 13:03:09,447][inference][INFO] - + Generation pass latency: 9.33e+00 (s)
21
+ [2023-09-27 13:03:09,448][inference][INFO] - + Generation pass throughput: 21.40 (tokens/s)
22
+ [2023-09-27 13:03:09,448][inference][INFO] - Saving inference results
23
+ [2023-09-27 13:03:09,450][backend][INFO] - Cleaning pytorch backend
24
+ [2023-09-27 13:03:09,450][backend][INFO] - + Deleting pretrained model
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/hydra_config.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.1.0+rocm5.6
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float32
14
+ disable_grad: true
15
+ eval_mode: true
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: inference
29
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
30
+ duration: 15
31
+ warmup_runs: 10
32
+ memory: false
33
+ energy: false
34
+ input_shapes:
35
+ batch_size: 1
36
+ sequence_length: 200
37
+ num_choices: 1
38
+ feature_size: 80
39
+ nb_max_frames: 3000
40
+ audio_sequence_length: 16000
41
+ new_tokens: 200
42
+ can_diffuse: false
43
+ can_generate: true
44
+ forward_kwargs: {}
45
+ generate_kwargs:
46
+ max_new_tokens: 200
47
+ min_new_tokens: 200
48
+ do_sample: false
49
+ use_cache: true
50
+ pad_token_id: 0
51
+ num_beams: 1
52
+ experiment_name: llama_1gpu_inference
53
+ model: meta-llama/Llama-2-7b-chat-hf
54
+ device: cuda
55
+ task: text-generation
56
+ hub_kwargs:
57
+ revision: main
58
+ cache_dir: null
59
+ force_download: false
60
+ local_files_only: false
61
+ environment:
62
+ optimum_version: 1.13.1
63
+ transformers_version: 4.34.0.dev0
64
+ accelerate_version: 0.23.0
65
+ diffusers_version: null
66
+ python_version: 3.10.12
67
+ system: Linux
68
+ cpu: ' AMD EPYC 7643 48-Core Processor'
69
+ cpu_count: 96
70
+ cpu_ram_mb: 1082028
71
+ gpus:
72
+ - Instinct MI210
73
+ - Instinct MI210
74
+ - Instinct MI210
75
+ - Instinct MI210
76
+ - Instinct MI210
77
+ - Instinct MI210
78
+ - Instinct MI210
79
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/1/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0.114,8.77,9.33,21.4
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/.config/config.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: ${pytorch_version:}
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float16
14
+ disable_grad: ${is_inference:${benchmark.name}}
15
+ eval_mode: ${is_inference:${benchmark.name}}
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: inference
29
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
30
+ duration: 15
31
+ warmup_runs: 10
32
+ memory: false
33
+ energy: false
34
+ input_shapes:
35
+ batch_size: 16
36
+ sequence_length: 200
37
+ num_choices: 1
38
+ feature_size: 80
39
+ nb_max_frames: 3000
40
+ audio_sequence_length: 16000
41
+ new_tokens: 200
42
+ can_diffuse: ${can_diffuse:${task}}
43
+ can_generate: ${can_generate:${task}}
44
+ forward_kwargs: {}
45
+ generate_kwargs: {}
46
+ experiment_name: llama_1gpu_inference
47
+ model: meta-llama/Llama-2-7b-chat-hf
48
+ device: cuda
49
+ task: ${infer_task:${model}}
50
+ hub_kwargs:
51
+ revision: main
52
+ cache_dir: null
53
+ force_download: false
54
+ local_files_only: false
55
+ environment:
56
+ optimum_version: 1.13.1
57
+ transformers_version: 4.34.0.dev0
58
+ accelerate_version: 0.23.0
59
+ diffusers_version: null
60
+ python_version: 3.10.12
61
+ system: Linux
62
+ cpu: ' AMD EPYC 7643 48-Core Processor'
63
+ cpu_count: 96
64
+ cpu_ram_mb: 1082028
65
+ gpus:
66
+ - Instinct MI210
67
+ - Instinct MI210
68
+ - Instinct MI210
69
+ - Instinct MI210
70
+ - Instinct MI210
71
+ - Instinct MI210
72
+ - Instinct MI210
73
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - benchmark.input_shapes.batch_size=16
126
+ - backend.torch_dtype=float16
127
+ job:
128
+ name: experiment
129
+ chdir: true
130
+ override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16
131
+ id: '2'
132
+ num: 2
133
+ config_name: llama2_1gpu_inference
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2
162
+ choices:
163
+ benchmark: inference
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - benchmark.input_shapes.batch_size=16
2
+ - backend.torch_dtype=float16
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/experiment.log ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-09-27 13:03:10,806][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200.
2
+ [2023-09-27 13:03:11,953][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
3
+ [2023-09-27 13:03:11,954][backend][INFO] - Configuring pytorch backend
4
+ [2023-09-27 13:03:11,954][pytorch][INFO] - + Disabling gradients
5
+ [2023-09-27 13:03:11,954][pytorch][INFO] - + Loading model on device: cuda
6
+ [2023-09-27 13:03:17,249][pytorch][INFO] - + Turning on model's eval mode
7
+ [2023-09-27 13:03:17,251][benchmark][INFO] - Configuring inference benchmark
8
+ [2023-09-27 13:03:17,251][inference][INFO] - Running inference benchmark
9
+ [2023-09-27 13:03:17,251][input_generator][INFO] - Using llama model type generator
10
+ [2023-09-27 13:03:17,251][inference][INFO] - + Preparing input for the forward pass
11
+ [2023-09-27 13:03:17,252][inference][INFO] - + Warming up the forward pass
12
+ [2023-09-27 13:03:21,847][inference][INFO] - + Tracking forward pass latency and throughput
13
+ [2023-09-27 13:03:21,847][latency_tracker][INFO] - Tracked Pytorch devices: [0]
14
+ [2023-09-27 13:03:37,595][inference][INFO] - + Forward pass latency: 5.09e-01 (s)
15
+ [2023-09-27 13:03:37,595][inference][INFO] - + Forward pass throughput: 31.40 (samples/s)
16
+ [2023-09-27 13:03:37,596][inference][INFO] - + Preparing input for the generation pass
17
+ [2023-09-27 13:03:37,596][inference][INFO] - + Warming up the generation pass
18
+ [2023-09-27 13:03:46,980][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-09-27 13:03:46,981][latency_tracker][INFO] - Tracked Pytorch devices: [0]
20
+ [2023-09-27 13:04:05,090][inference][INFO] - + Generation pass latency: 9.05e+00 (s)
21
+ [2023-09-27 13:04:05,090][inference][INFO] - + Generation pass throughput: 354.00 (tokens/s)
22
+ [2023-09-27 13:04:05,090][inference][INFO] - Saving inference results
23
+ [2023-09-27 13:04:05,092][backend][INFO] - Cleaning pytorch backend
24
+ [2023-09-27 13:04:05,092][backend][INFO] - + Deleting pretrained model
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/hydra_config.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.1.0+rocm5.6
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float16
14
+ disable_grad: true
15
+ eval_mode: true
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: inference
29
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
30
+ duration: 15
31
+ warmup_runs: 10
32
+ memory: false
33
+ energy: false
34
+ input_shapes:
35
+ batch_size: 16
36
+ sequence_length: 200
37
+ num_choices: 1
38
+ feature_size: 80
39
+ nb_max_frames: 3000
40
+ audio_sequence_length: 16000
41
+ new_tokens: 200
42
+ can_diffuse: false
43
+ can_generate: true
44
+ forward_kwargs: {}
45
+ generate_kwargs:
46
+ max_new_tokens: 200
47
+ min_new_tokens: 200
48
+ do_sample: false
49
+ use_cache: true
50
+ pad_token_id: 0
51
+ num_beams: 1
52
+ experiment_name: llama_1gpu_inference
53
+ model: meta-llama/Llama-2-7b-chat-hf
54
+ device: cuda
55
+ task: text-generation
56
+ hub_kwargs:
57
+ revision: main
58
+ cache_dir: null
59
+ force_download: false
60
+ local_files_only: false
61
+ environment:
62
+ optimum_version: 1.13.1
63
+ transformers_version: 4.34.0.dev0
64
+ accelerate_version: 0.23.0
65
+ diffusers_version: null
66
+ python_version: 3.10.12
67
+ system: Linux
68
+ cpu: ' AMD EPYC 7643 48-Core Processor'
69
+ cpu_count: 96
70
+ cpu_ram_mb: 1082028
71
+ gpus:
72
+ - Instinct MI210
73
+ - Instinct MI210
74
+ - Instinct MI210
75
+ - Instinct MI210
76
+ - Instinct MI210
77
+ - Instinct MI210
78
+ - Instinct MI210
79
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/2/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 0.509,31.4,9.05,354.0
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/.config/config.yaml ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: ${pytorch_version:}
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float32
14
+ disable_grad: ${is_inference:${benchmark.name}}
15
+ eval_mode: ${is_inference:${benchmark.name}}
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: inference
29
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
30
+ duration: 15
31
+ warmup_runs: 10
32
+ memory: false
33
+ energy: false
34
+ input_shapes:
35
+ batch_size: 16
36
+ sequence_length: 200
37
+ num_choices: 1
38
+ feature_size: 80
39
+ nb_max_frames: 3000
40
+ audio_sequence_length: 16000
41
+ new_tokens: 200
42
+ can_diffuse: ${can_diffuse:${task}}
43
+ can_generate: ${can_generate:${task}}
44
+ forward_kwargs: {}
45
+ generate_kwargs: {}
46
+ experiment_name: llama_1gpu_inference
47
+ model: meta-llama/Llama-2-7b-chat-hf
48
+ device: cuda
49
+ task: ${infer_task:${model}}
50
+ hub_kwargs:
51
+ revision: main
52
+ cache_dir: null
53
+ force_download: false
54
+ local_files_only: false
55
+ environment:
56
+ optimum_version: 1.13.1
57
+ transformers_version: 4.34.0.dev0
58
+ accelerate_version: 0.23.0
59
+ diffusers_version: null
60
+ python_version: 3.10.12
61
+ system: Linux
62
+ cpu: ' AMD EPYC 7643 48-Core Processor'
63
+ cpu_count: 96
64
+ cpu_ram_mb: 1082028
65
+ gpus:
66
+ - Instinct MI210
67
+ - Instinct MI210
68
+ - Instinct MI210
69
+ - Instinct MI210
70
+ - Instinct MI210
71
+ - Instinct MI210
72
+ - Instinct MI210
73
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/.config/hydra.yaml ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task:
125
+ - benchmark.input_shapes.batch_size=16
126
+ - backend.torch_dtype=float32
127
+ job:
128
+ name: experiment
129
+ chdir: true
130
+ override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16
131
+ id: '3'
132
+ num: 3
133
+ config_name: llama2_1gpu_inference
134
+ env_set: {}
135
+ env_copy: []
136
+ config:
137
+ override_dirname:
138
+ kv_sep: '='
139
+ item_sep: ','
140
+ exclude_keys: []
141
+ runtime:
142
+ version: 1.3.2
143
+ version_base: '1.3'
144
+ cwd: /home/user/transformers-regression
145
+ config_sources:
146
+ - path: hydra.conf
147
+ schema: pkg
148
+ provider: hydra
149
+ - path: optimum_benchmark
150
+ schema: pkg
151
+ provider: main
152
+ - path: hydra_plugins.hydra_colorlog.conf
153
+ schema: pkg
154
+ provider: hydra-colorlog
155
+ - path: /home/user/transformers-regression/configs
156
+ schema: file
157
+ provider: command-line
158
+ - path: ''
159
+ schema: structured
160
+ provider: schema
161
+ output_dir: /home/user/transformers-regression/sweeps/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3
162
+ choices:
163
+ benchmark: inference
164
+ backend: pytorch
165
+ hydra/env: default
166
+ hydra/callbacks: null
167
+ hydra/job_logging: colorlog
168
+ hydra/hydra_logging: colorlog
169
+ hydra/hydra_help: default
170
+ hydra/help: default
171
+ hydra/sweeper: basic
172
+ hydra/launcher: basic
173
+ hydra/output: default
174
+ verbose: false
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/.config/overrides.yaml ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ - benchmark.input_shapes.batch_size=16
2
+ - backend.torch_dtype=float32
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/experiment.log ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [2023-09-27 13:04:06,238][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200.
2
+ [2023-09-27 13:04:07,496][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama
3
+ [2023-09-27 13:04:07,496][backend][INFO] - Configuring pytorch backend
4
+ [2023-09-27 13:04:07,497][pytorch][INFO] - + Disabling gradients
5
+ [2023-09-27 13:04:07,497][pytorch][INFO] - + Loading model on device: cuda
6
+ [2023-09-27 13:04:12,687][pytorch][INFO] - + Turning on model's eval mode
7
+ [2023-09-27 13:04:12,689][benchmark][INFO] - Configuring inference benchmark
8
+ [2023-09-27 13:04:12,689][inference][INFO] - Running inference benchmark
9
+ [2023-09-27 13:04:12,689][input_generator][INFO] - Using llama model type generator
10
+ [2023-09-27 13:04:12,689][inference][INFO] - + Preparing input for the forward pass
11
+ [2023-09-27 13:04:12,690][inference][INFO] - + Warming up the forward pass
12
+ [2023-09-27 13:04:25,380][inference][INFO] - + Tracking forward pass latency and throughput
13
+ [2023-09-27 13:04:25,380][latency_tracker][INFO] - Tracked Pytorch devices: [0]
14
+ [2023-09-27 13:04:42,303][inference][INFO] - + Forward pass latency: 1.41e+00 (s)
15
+ [2023-09-27 13:04:42,304][inference][INFO] - + Forward pass throughput: 11.30 (samples/s)
16
+ [2023-09-27 13:04:42,305][inference][INFO] - + Preparing input for the generation pass
17
+ [2023-09-27 13:04:42,305][inference][INFO] - + Warming up the generation pass
18
+ [2023-09-27 13:04:56,929][inference][INFO] - + Tracking generation latency and throughput
19
+ [2023-09-27 13:04:56,929][latency_tracker][INFO] - Tracked Pytorch devices: [0]
20
+ [2023-09-27 13:05:25,309][inference][INFO] - + Generation pass latency: 1.42e+01 (s)
21
+ [2023-09-27 13:05:25,310][inference][INFO] - + Generation pass throughput: 225.00 (tokens/s)
22
+ [2023-09-27 13:05:25,310][inference][INFO] - Saving inference results
23
+ [2023-09-27 13:05:25,312][backend][INFO] - Cleaning pytorch backend
24
+ [2023-09-27 13:05:25,312][backend][INFO] - + Deleting pretrained model
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/hydra_config.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ backend:
2
+ name: pytorch
3
+ version: 2.1.0+rocm5.6
4
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
5
+ seed: 42
6
+ inter_op_num_threads: null
7
+ intra_op_num_threads: null
8
+ initial_isolation_check: false
9
+ continous_isolation_check: false
10
+ delete_cache: false
11
+ no_weights: false
12
+ device_map: null
13
+ torch_dtype: float32
14
+ disable_grad: true
15
+ eval_mode: true
16
+ amp_autocast: false
17
+ amp_dtype: null
18
+ torch_compile: false
19
+ torch_compile_config: {}
20
+ bettertransformer: false
21
+ quantization_scheme: null
22
+ quantization_config: {}
23
+ use_ddp: false
24
+ ddp_config: {}
25
+ peft_strategy: null
26
+ peft_config: {}
27
+ benchmark:
28
+ name: inference
29
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
30
+ duration: 15
31
+ warmup_runs: 10
32
+ memory: false
33
+ energy: false
34
+ input_shapes:
35
+ batch_size: 16
36
+ sequence_length: 200
37
+ num_choices: 1
38
+ feature_size: 80
39
+ nb_max_frames: 3000
40
+ audio_sequence_length: 16000
41
+ new_tokens: 200
42
+ can_diffuse: false
43
+ can_generate: true
44
+ forward_kwargs: {}
45
+ generate_kwargs:
46
+ max_new_tokens: 200
47
+ min_new_tokens: 200
48
+ do_sample: false
49
+ use_cache: true
50
+ pad_token_id: 0
51
+ num_beams: 1
52
+ experiment_name: llama_1gpu_inference
53
+ model: meta-llama/Llama-2-7b-chat-hf
54
+ device: cuda
55
+ task: text-generation
56
+ hub_kwargs:
57
+ revision: main
58
+ cache_dir: null
59
+ force_download: false
60
+ local_files_only: false
61
+ environment:
62
+ optimum_version: 1.13.1
63
+ transformers_version: 4.34.0.dev0
64
+ accelerate_version: 0.23.0
65
+ diffusers_version: null
66
+ python_version: 3.10.12
67
+ system: Linux
68
+ cpu: ' AMD EPYC 7643 48-Core Processor'
69
+ cpu_count: 96
70
+ cpu_ram_mb: 1082028
71
+ gpus:
72
+ - Instinct MI210
73
+ - Instinct MI210
74
+ - Instinct MI210
75
+ - Instinct MI210
76
+ - Instinct MI210
77
+ - Instinct MI210
78
+ - Instinct MI210
79
+ - Instinct MI210
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/3/inference_results.csv ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s)
2
+ 1.41,11.3,14.2,225.0
raw_results/2023-09-26_17:27:09_408b2b3c5057b275855ae4c43c452a7f0b37aa45/llama_1gpu_inference/multirun.yaml ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
4
+ sweep:
5
+ dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params:
13
+ benchmark.input_shapes.batch_size: 1,16
14
+ backend.torch_dtype: float16,float32
15
+ help:
16
+ app_name: ${hydra.job.name}
17
+ header: '${hydra.help.app_name} is powered by Hydra.
18
+
19
+ '
20
+ footer: 'Powered by Hydra (https://hydra.cc)
21
+
22
+ Use --hydra-help to view Hydra specific help
23
+
24
+ '
25
+ template: '${hydra.help.header}
26
+
27
+ == Configuration groups ==
28
+
29
+ Compose your configuration from those groups (group=option)
30
+
31
+
32
+ $APP_CONFIG_GROUPS
33
+
34
+
35
+ == Config ==
36
+
37
+ Override anything in the config (foo.bar=value)
38
+
39
+
40
+ $CONFIG
41
+
42
+
43
+ ${hydra.help.footer}
44
+
45
+ '
46
+ hydra_help:
47
+ template: 'Hydra (${hydra.runtime.version})
48
+
49
+ See https://hydra.cc for more info.
50
+
51
+
52
+ == Flags ==
53
+
54
+ $FLAGS_HELP
55
+
56
+
57
+ == Configuration groups ==
58
+
59
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
60
+ to command line)
61
+
62
+
63
+ $HYDRA_CONFIG_GROUPS
64
+
65
+
66
+ Use ''--cfg hydra'' to Show the Hydra config.
67
+
68
+ '
69
+ hydra_help: ???
70
+ hydra_logging:
71
+ version: 1
72
+ formatters:
73
+ colorlog:
74
+ (): colorlog.ColoredFormatter
75
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s'
76
+ handlers:
77
+ console:
78
+ class: logging.StreamHandler
79
+ formatter: colorlog
80
+ stream: ext://sys.stdout
81
+ root:
82
+ level: INFO
83
+ handlers:
84
+ - console
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ colorlog:
92
+ (): colorlog.ColoredFormatter
93
+ format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s]
94
+ - %(message)s'
95
+ log_colors:
96
+ DEBUG: purple
97
+ INFO: green
98
+ WARNING: yellow
99
+ ERROR: red
100
+ CRITICAL: red
101
+ handlers:
102
+ console:
103
+ class: logging.StreamHandler
104
+ formatter: colorlog
105
+ stream: ext://sys.stdout
106
+ file:
107
+ class: logging.FileHandler
108
+ formatter: simple
109
+ filename: ${hydra.job.name}.log
110
+ root:
111
+ level: INFO
112
+ handlers:
113
+ - console
114
+ - file
115
+ disable_existing_loggers: false
116
+ env: {}
117
+ mode: MULTIRUN
118
+ searchpath: []
119
+ callbacks: {}
120
+ output_subdir: .hydra
121
+ overrides:
122
+ hydra:
123
+ - hydra.mode=MULTIRUN
124
+ task: []
125
+ job:
126
+ name: experiment
127
+ chdir: true
128
+ override_dirname: ''
129
+ id: ???
130
+ num: ???
131
+ config_name: llama2_1gpu_inference
132
+ env_set: {}
133
+ env_copy: []
134
+ config:
135
+ override_dirname:
136
+ kv_sep: '='
137
+ item_sep: ','
138
+ exclude_keys: []
139
+ runtime:
140
+ version: 1.3.2
141
+ version_base: '1.3'
142
+ cwd: /home/user/transformers-regression
143
+ config_sources:
144
+ - path: hydra.conf
145
+ schema: pkg
146
+ provider: hydra
147
+ - path: optimum_benchmark
148
+ schema: pkg
149
+ provider: main
150
+ - path: hydra_plugins.hydra_colorlog.conf
151
+ schema: pkg
152
+ provider: hydra-colorlog
153
+ - path: /home/user/transformers-regression/configs
154
+ schema: file
155
+ provider: command-line
156
+ - path: ''
157
+ schema: structured
158
+ provider: schema
159
+ output_dir: ???
160
+ choices:
161
+ benchmark: inference
162
+ backend: pytorch
163
+ hydra/env: default
164
+ hydra/callbacks: null
165
+ hydra/job_logging: colorlog
166
+ hydra/hydra_logging: colorlog
167
+ hydra/hydra_help: default
168
+ hydra/help: default
169
+ hydra/sweeper: basic
170
+ hydra/launcher: basic
171
+ hydra/output: default
172
+ verbose: false
173
+ backend:
174
+ name: pytorch
175
+ version: ${pytorch_version:}
176
+ _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend
177
+ seed: 42
178
+ inter_op_num_threads: null
179
+ intra_op_num_threads: null
180
+ initial_isolation_check: false
181
+ continous_isolation_check: false
182
+ delete_cache: false
183
+ no_weights: false
184
+ device_map: null
185
+ torch_dtype: null
186
+ disable_grad: ${is_inference:${benchmark.name}}
187
+ eval_mode: ${is_inference:${benchmark.name}}
188
+ amp_autocast: false
189
+ amp_dtype: null
190
+ torch_compile: false
191
+ torch_compile_config: {}
192
+ bettertransformer: false
193
+ quantization_scheme: null
194
+ quantization_config: {}
195
+ use_ddp: false
196
+ ddp_config: {}
197
+ peft_strategy: null
198
+ peft_config: {}
199
+ benchmark:
200
+ name: inference
201
+ _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark
202
+ duration: 15
203
+ warmup_runs: 10
204
+ memory: false
205
+ energy: false
206
+ input_shapes:
207
+ batch_size: 2
208
+ sequence_length: 200
209
+ num_choices: 1
210
+ feature_size: 80
211
+ nb_max_frames: 3000
212
+ audio_sequence_length: 16000
213
+ new_tokens: 200
214
+ can_diffuse: ${can_diffuse:${task}}
215
+ can_generate: ${can_generate:${task}}
216
+ forward_kwargs: {}
217
+ generate_kwargs: {}
218
+ experiment_name: llama_1gpu_inference
219
+ model: meta-llama/Llama-2-7b-chat-hf
220
+ device: cuda
221
+ task: ${infer_task:${model}}
222
+ hub_kwargs:
223
+ revision: main
224
+ cache_dir: null
225
+ force_download: false
226
+ local_files_only: false
227
+ environment:
228
+ optimum_version: 1.13.1
229
+ transformers_version: 4.34.0.dev0
230
+ accelerate_version: 0.23.0
231
+ diffusers_version: null
232
+ python_version: 3.10.12
233
+ system: Linux
234
+ cpu: ' AMD EPYC 7643 48-Core Processor'
235
+ cpu_count: 96
236
+ cpu_ram_mb: 1082028
237
+ gpus:
238
+ - Instinct MI210
239
+ - Instinct MI210
240
+ - Instinct MI210
241
+ - Instinct MI210
242
+ - Instinct MI210
243
+ - Instinct MI210
244
+ - Instinct MI210
245
+ - Instinct MI210