diff --git a/dana/configs/db/Inference/infos/benchmarks.series.json b/dana/configs/db/Inference/infos/benchmarks.series.json deleted file mode 100644 index 2da471e1aeef907be75e5faeec63a242d82f6e4a..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/infos/benchmarks.series.json +++ /dev/null @@ -1 +0,0 @@ -{"llama_1gpu_0_forward_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_0_forward_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_0_generate_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_0_generate_throughput_tokens_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_1_forward_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_1_forward_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_1_generate_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_1_generate_throughput_tokens_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_2_forward_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_2_forward_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_2_generate_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_2_generate_throughput_tokens_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_3_forward_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_3_forward_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_3_generate_latency_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"},"llama_1gpu_3_generate_throughput_tokens_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","state":"similarNeedstriage"}} \ No newline at end of file diff --git a/dana/configs/db/Inference/infos/benchmarks.statusSeries.json b/dana/configs/db/Inference/infos/benchmarks.statusSeries.json deleted file mode 100644 index c57b47ca8bd819f72955375e19f7d983dfa0d66b..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/infos/benchmarks.statusSeries.json +++ /dev/null @@ -1 +0,0 @@ -{"0":{"numSeries":16,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":16,"time":1695816293533}} \ No newline at end of file diff --git a/dana/configs/db/Inference/infos/builds.json b/dana/configs/db/Inference/infos/builds.json deleted file mode 100644 index dd487443ef60859508543c17e2796cf9a1cb5cf1..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/infos/builds.json +++ /dev/null @@ -1 +0,0 @@ -{"14054":{"buildId":14054,"infos":{"hash":"153755ee386ac73e04814a94337abcb1208ff5d1","abbrevHash":"153755ee","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`FA` / `tests`] Add use_cache tests for FA models (#26415)","url":null}},"14055":{"buildId":14055,"infos":{"hash":"946bac798caefada3f5f1c9fecdcfd587ed24ac7","abbrevHash":"946bac79","authorName":"statelesshz","authorEmail":"hzji210@gmail.com","subject":"add bf16 mixed precision support for NPU (#26163)","url":null}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_latency_s/_.json b/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_latency_s/_.json deleted file mode 100644 index 74649e28662678333a8bb6180eff7ee9ea9b7257..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_latency_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":0.00239,"14055":0.00312},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_throughpu/t_samples_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_throughpu/t_samples_/s_.json deleted file mode 100644 index 395ee75821348c6759a5ae92e976a187cd2d8832..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_0_forward/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":418,"14055":321},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_latency_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_latency_/s_.json deleted file mode 100644 index 9b0a51b1aad4637c8af183fde274ef09909ae1ec..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_latency_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":0.491,"14055":0.639},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_throughp/ut_tokens_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_throughp/ut_tokens_/s_.json deleted file mode 100644 index 370950f1aac303762957bcd6cb4631f1785f7ae7..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_0_generat/e_throughp/ut_tokens_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":407,"14055":313},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_latency_s/_.json b/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_latency_s/_.json deleted file mode 100644 index d4b99848786046bab6449755df47ea30765e0f95..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_latency_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":0.00328,"14055":0.00332},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_throughpu/t_samples_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_throughpu/t_samples_/s_.json deleted file mode 100644 index 0b622933b3a78e156d91b131a8d0c90f54c83296..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_1_forward/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":305,"14055":301},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_latency_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_latency_/s_.json deleted file mode 100644 index a6d2eedc2015c7b728f6993197825ee4fa1d0816..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_latency_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":0.53,"14055":0.537},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_throughp/ut_tokens_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_throughp/ut_tokens_/s_.json deleted file mode 100644 index 29fabf161e5932e8dd3a683865a6f5c00411af1a..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_1_generat/e_throughp/ut_tokens_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 1\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":377,"14055":372},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_latency_s/_.json b/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_latency_s/_.json deleted file mode 100644 index f6d07fcadd83ff71122fe6c65ed1be7630df4a63..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_latency_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":0.0041,"14055":0.00487},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_throughpu/t_samples_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_throughpu/t_samples_/s_.json deleted file mode 100644 index 8f845872ffae1ca2fe901d9d215b8464b8525768..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_2_forward/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":3900,"14055":3290},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_latency_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_latency_/s_.json deleted file mode 100644 index a6323be2e9692fbc44dc56ea819dacc97103f56b..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_latency_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":0.655,"14055":0.765},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_throughp/ut_tokens_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_throughp/ut_tokens_/s_.json deleted file mode 100644 index a0f524de14cf7fa2d5f0dd31d4c679e98aa8f0a5..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_2_generat/e_throughp/ut_tokens_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float16\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":4890,"14055":4180},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_latency_s/_.json b/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_latency_s/_.json deleted file mode 100644 index 19ad1b58d458be058ba8be3f9d5535d16f7bb888..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_latency_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":0.00457,"14055":0.00609},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_throughpu/t_samples_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_throughpu/t_samples_/s_.json deleted file mode 100644 index 18db8f190083baf5f7e83fc8908909bd73593cf6..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_3_forward/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":3500,"14055":2630},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_latency_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_latency_/s_.json deleted file mode 100644 index bbc3acf6f3fabb340896486490b62c34cc65bd81..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_latency_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":0.539,"14055":0.822},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_throughp/ut_tokens_/s_.json b/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_throughp/ut_tokens_/s_.json deleted file mode 100644 index 2bd088bf97392cbaeb1436f1a789c9e55d0b60b2..0000000000000000000000000000000000000000 --- a/dana/configs/db/Inference/series/llama_1gpu/_3_generat/e_throughp/ut_tokens_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Inference","description":"\nbenchmark.input_shapes.batch_size: 16\nbackend.torch_dtype: float32\nbenchmark.input_shapes.sequence_length: 200\nbenchmark.new_tokens: 200","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":5940,"14055":3890},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Training/infos/benchmarks.series.json b/dana/configs/db/Training/infos/benchmarks.series.json deleted file mode 100644 index 386c7303b2fa517a2eec0091af637a41a3d81787..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/infos/benchmarks.series.json +++ /dev/null @@ -1 +0,0 @@ -{"bert_1gpu_0_training_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\n+benchmark.training_arguments.per_device_train_batch_size: None\nbackend.torch_dtype: float16\nbenchmark.dataset_shapes.sequence_length: 256","state":"similarNeedstriage"},"bert_1gpu_0_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\n+benchmark.training_arguments.per_device_train_batch_size: None\nbackend.torch_dtype: float16\nbenchmark.dataset_shapes.sequence_length: 256","state":"similarNeedstriage"},"bert_1gpu_1_training_runtime_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\n+benchmark.training_arguments.per_device_train_batch_size: None\nbackend.torch_dtype: float32\nbenchmark.dataset_shapes.sequence_length: 256","state":"similarNeedstriage"},"bert_1gpu_1_training_throughput_samples_s_":{"status":{"error":"Unable to find first average","lastBuildId":14055},"description":"\n+benchmark.training_arguments.per_device_train_batch_size: None\nbackend.torch_dtype: float32\nbenchmark.dataset_shapes.sequence_length: 256","state":"similarNeedstriage"}} \ No newline at end of file diff --git a/dana/configs/db/Training/infos/benchmarks.statusSeries.json b/dana/configs/db/Training/infos/benchmarks.statusSeries.json deleted file mode 100644 index 8285f62564e757b6baa9240a3ae0ae96c6c56dba..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/infos/benchmarks.statusSeries.json +++ /dev/null @@ -1 +0,0 @@ -{"0":{"numSeries":4,"numSeriesSimilar":0,"numSeriesImproved":0,"numSeriesRegression":0,"numSeriesUndefined":4,"time":1695816293579}} \ No newline at end of file diff --git a/dana/configs/db/Training/infos/builds.json b/dana/configs/db/Training/infos/builds.json deleted file mode 100644 index dd487443ef60859508543c17e2796cf9a1cb5cf1..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/infos/builds.json +++ /dev/null @@ -1 +0,0 @@ -{"14054":{"buildId":14054,"infos":{"hash":"153755ee386ac73e04814a94337abcb1208ff5d1","abbrevHash":"153755ee","authorName":"Younes Belkada","authorEmail":"49240599+younesbelkada@users.noreply.github.com","subject":"[`FA` / `tests`] Add use_cache tests for FA models (#26415)","url":null}},"14055":{"buildId":14055,"infos":{"hash":"946bac798caefada3f5f1c9fecdcfd587ed24ac7","abbrevHash":"946bac79","authorName":"statelesshz","authorEmail":"hzji210@gmail.com","subject":"add bf16 mixed precision support for NPU (#26163)","url":null}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_training/_runtime_s/_.json b/dana/configs/db/Training/series/bert_1gpu_/0_training/_runtime_s/_.json deleted file mode 100644 index 87926918e2374294e94a2c3c36cad25b5fa99d17..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_training/_runtime_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","description":"\n+benchmark.training_arguments.per_device_train_batch_size: None\nbackend.torch_dtype: float16\nbenchmark.dataset_shapes.sequence_length: 256","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":37.33880257606506,"14055":32.68557357788086},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/0_training/_throughpu/t_samples_/s_.json b/dana/configs/db/Training/series/bert_1gpu_/0_training/_throughpu/t_samples_/s_.json deleted file mode 100644 index dd09c9b81796dcaed65c07aa4167bad19b41e50e..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/0_training/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","description":"\n+benchmark.training_arguments.per_device_train_batch_size: None\nbackend.torch_dtype: float16\nbenchmark.dataset_shapes.sequence_length: 256","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":86.55874792492082,"14055":98.88154455356337},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_training/_runtime_s/_.json b/dana/configs/db/Training/series/bert_1gpu_/1_training/_runtime_s/_.json deleted file mode 100644 index 7722e885883546d2a01d2d6c65dd78c3ee26e71d..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_training/_runtime_s/_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","description":"\n+benchmark.training_arguments.per_device_train_batch_size: None\nbackend.torch_dtype: float32\nbenchmark.dataset_shapes.sequence_length: 256","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":13.53417706489563,"14055":13.473761320114136},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/Training/series/bert_1gpu_/1_training/_throughpu/t_samples_/s_.json b/dana/configs/db/Training/series/bert_1gpu_/1_training/_throughpu/t_samples_/s_.json deleted file mode 100644 index ee2cd9b70645b8059d3ed872a02dfef285483a43..0000000000000000000000000000000000000000 --- a/dana/configs/db/Training/series/bert_1gpu_/1_training/_throughpu/t_samples_/s_.json +++ /dev/null @@ -1 +0,0 @@ -{"projectId":"Training","description":"\n+benchmark.training_arguments.per_device_train_batch_size: None\nbackend.torch_dtype: float32\nbenchmark.dataset_shapes.sequence_length: 256","analyse":{"benchmark":{"range":"10%","required":5,"trend":"smaller"}},"assignee":{"compares":{}},"samples":{"14054":238.802845899144,"14055":239.87362720869555},"state":{"analyse":"similarNeedstriage","compares":{}},"lastBuildId":"14055","analyseResult":{"summary":{"error":"Unable to find first average","lastBuildId":14055}}} \ No newline at end of file diff --git a/dana/configs/db/admin/globalStats.json b/dana/configs/db/admin/globalStats.json deleted file mode 100644 index 05940571fe2f74cbbf0464174cfd7b08d1db2127..0000000000000000000000000000000000000000 --- a/dana/configs/db/admin/globalStats.json +++ /dev/null @@ -1 +0,0 @@ -{"numSamples":40,"numSeries":20,"projects":{"Inference":{"numSamples":32,"numSeries":16},"Training":{"numSamples":8,"numSeries":4}}} \ No newline at end of file diff --git a/dana/configs/db/admin/projects.json b/dana/configs/db/admin/projects.json deleted file mode 100644 index 202c72e0009103138f1017fe29f9a4c4ce315f2e..0000000000000000000000000000000000000000 --- a/dana/configs/db/admin/projects.json +++ /dev/null @@ -1 +0,0 @@ -{"Inference":{"description":"Benchmarks related to inference","users":"","useBugTracker":false},"Training":{"description":"Benchmarks related to training","users":"","useBugTracker":false}} \ No newline at end of file diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/config.yaml deleted file mode 100644 index 2fa7431d84a1d03788f1e178b58c75b7f01f84a3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/hydra.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/hydra.yaml deleted file mode 100644 index 2919bbefb2db364c1521fe841d5f0b04a8ef1729..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: '32' - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16 - id: '0' - num: 0 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/overrides.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/overrides.yaml deleted file mode 100644 index d5e7aac495a5978edf9851ed4029a4f4f4de707b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/experiment.log b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/experiment.log deleted file mode 100644 index 9d3bed7d4bf709187c9c88491deffb3ec63fdacf..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/experiment.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-09-27 11:57:31,928][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-27 11:57:34,386][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-27 11:57:34,386][backend][INFO] - Configuring pytorch backend -[2023-09-27 11:57:34,389][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:57:34,517][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:57:34,530][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 11:57:35,285][benchmark][INFO] - Configuring training benchmark -[2023-09-27 11:57:35,286][training][INFO] - Running training benchmark -[2023-09-27 11:57:35,286][dataset_generator][INFO] - Using text-classification task generator -[2023-09-27 11:57:35,335][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-27 11:57:35,335][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-27 11:57:35,337][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-27 11:57:35,341][pytorch][INFO] - + Starting training -[2023-09-27 11:57:55,096][pytorch][INFO] - + Training finished successfully -[2023-09-27 11:57:55,097][training][INFO] - Saving training results -[2023-09-27 11:57:55,100][backend][INFO] - Cleaning pytorch backend -[2023-09-27 11:57:55,100][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index ce5ceabbf670712f423e7b59bda6e78c3d9bf547..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/training_results.csv b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index 6719843b59f06f9c21058e8b7c069b583b801fbb..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -6.112920045852661,209.39256368458834,13.53417706489563,238.802845899144,19.647098064422607,164.50266545228763 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/config.yaml deleted file mode 100644 index 9481e5a5f6a940d90f8e3eeecf521edb6cba2b5f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/hydra.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/hydra.yaml deleted file mode 100644 index 4fbe01adebafa2078ce70844dc68c57b545b9dc9..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: '32' - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32 - id: '1' - num: 1 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/overrides.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/overrides.yaml deleted file mode 100644 index f92a32ab90ab68f9e88427057fcb5ef3cbec936b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/experiment.log b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/experiment.log deleted file mode 100644 index 908a5bb2a5e48ecba0fee5a8097b28a67177749f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-27 11:57:56,611][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-27 11:57:56,612][backend][INFO] - Configuring pytorch backend -[2023-09-27 11:57:56,612][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:57:56,733][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:57:56,749][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 11:57:57,432][benchmark][INFO] - Configuring training benchmark -[2023-09-27 11:57:57,432][training][INFO] - Running training benchmark -[2023-09-27 11:57:57,433][dataset_generator][INFO] - Using text-classification task generator -[2023-09-27 11:57:57,467][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-27 11:57:57,468][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-27 11:57:57,469][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-27 11:57:57,474][pytorch][INFO] - + Starting training -[2023-09-27 11:58:50,280][pytorch][INFO] - + Training finished successfully -[2023-09-27 11:58:50,281][training][INFO] - Saving training results -[2023-09-27 11:58:50,282][backend][INFO] - Cleaning pytorch backend -[2023-09-27 11:58:50,282][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index aaf5d59736c1ac950d8013078b3a4cec11901087..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/training_results.csv b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index 3aab45848bc777351fa3fc54de9674d7c7b0a758..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -15.344141006469727,83.41946280735421,37.33880257606506,86.55874792492082,52.682945013046265,61.34812697353263 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/multirun.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/multirun.yaml deleted file mode 100644 index 7ec27559618fc6bbc0449f581cfb639b6b37f40f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/bert_1gpu_training/multirun.yaml +++ /dev/null @@ -1,246 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: '32' - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: ??? - num: ??? - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: ??? - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/config.yaml deleted file mode 100644 index ec0ef3c027bbeee0cbf48e5389c23fee9802c7b2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/hydra.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/hydra.yaml deleted file mode 100644 index 8af24917c173b63b229442257c640a92ad84c047..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/overrides.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/overrides.yaml deleted file mode 100644 index b7fc5900f179157ac0449016dd8d9497ffd58db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/experiment.log b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/experiment.log deleted file mode 100644 index 0f8d15919c93fea44e88fefa7c6472c9a6f80569..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/experiment.log +++ /dev/null @@ -1,27 +0,0 @@ -[2023-09-27 11:58:54,429][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-27 11:58:54,571][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-27 11:58:56,806][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-27 11:58:56,806][backend][INFO] - Configuring pytorch backend -[2023-09-27 11:58:56,807][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:58:56,931][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:58:56,946][pytorch][INFO] - + Disabling gradients -[2023-09-27 11:58:56,947][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 11:58:57,576][pytorch][INFO] - + Turning on model's eval mode -[2023-09-27 11:58:57,583][benchmark][INFO] - Configuring inference benchmark -[2023-09-27 11:58:57,584][inference][INFO] - Running inference benchmark -[2023-09-27 11:58:57,584][input_generator][INFO] - Using llama model type generator -[2023-09-27 11:58:57,606][inference][INFO] - + Preparing input for the forward pass -[2023-09-27 11:58:57,606][inference][INFO] - + Warming up the forward pass -[2023-09-27 11:58:57,936][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-27 11:58:57,937][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 11:59:08,120][inference][INFO] - + Forward pass latency: 3.28e-03 (s) -[2023-09-27 11:59:08,122][inference][INFO] - + Forward pass throughput: 305.00 (samples/s) -[2023-09-27 11:59:08,122][inference][INFO] - + Preparing input for the generation pass -[2023-09-27 11:59:08,122][inference][INFO] - + Warming up the generation pass -[2023-09-27 11:59:09,154][inference][INFO] - + Tracking generation latency and throughput -[2023-09-27 11:59:09,154][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 11:59:19,765][inference][INFO] - + Generation pass latency: 5.30e-01 (s) -[2023-09-27 11:59:19,766][inference][INFO] - + Generation pass throughput: 377.00 (tokens/s) -[2023-09-27 11:59:19,766][inference][INFO] - Saving inference results -[2023-09-27 11:59:19,773][backend][INFO] - Cleaning pytorch backend -[2023-09-27 11:59:19,773][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 1b2c1057cf26f3ea842d62d062d093cc577ef846..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index 6ce4f1566393fab67c83cd7d069b8076c9870f94..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00328,305.0,0.53,377.0 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/config.yaml deleted file mode 100644 index 8bddc839f46a99f0877837da7c1db809c4ff56f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/hydra.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/hydra.yaml deleted file mode 100644 index f468dee36d9f7cf6e13baaed4a8502ad0dea845d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1 - id: '1' - num: 1 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/overrides.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/overrides.yaml deleted file mode 100644 index 8b4741e9eb919dcf02db7f865f28d92490b262b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/experiment.log b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/experiment.log deleted file mode 100644 index c9e1360a90bce2fee7b7e3c33f22c363b124e354..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-27 11:59:20,394][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-27 11:59:21,571][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-27 11:59:21,571][backend][INFO] - Configuring pytorch backend -[2023-09-27 11:59:21,572][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:59:21,699][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:59:21,716][pytorch][INFO] - + Disabling gradients -[2023-09-27 11:59:21,717][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 11:59:22,228][pytorch][INFO] - + Turning on model's eval mode -[2023-09-27 11:59:22,229][benchmark][INFO] - Configuring inference benchmark -[2023-09-27 11:59:22,229][inference][INFO] - Running inference benchmark -[2023-09-27 11:59:22,229][input_generator][INFO] - Using llama model type generator -[2023-09-27 11:59:22,230][inference][INFO] - + Preparing input for the forward pass -[2023-09-27 11:59:22,230][inference][INFO] - + Warming up the forward pass -[2023-09-27 11:59:22,359][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-27 11:59:22,359][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 11:59:32,649][inference][INFO] - + Forward pass latency: 2.39e-03 (s) -[2023-09-27 11:59:32,652][inference][INFO] - + Forward pass throughput: 418.00 (samples/s) -[2023-09-27 11:59:32,652][inference][INFO] - + Preparing input for the generation pass -[2023-09-27 11:59:32,652][inference][INFO] - + Warming up the generation pass -[2023-09-27 11:59:33,274][inference][INFO] - + Tracking generation latency and throughput -[2023-09-27 11:59:33,274][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 11:59:43,578][inference][INFO] - + Generation pass latency: 4.91e-01 (s) -[2023-09-27 11:59:43,578][inference][INFO] - + Generation pass throughput: 407.00 (tokens/s) -[2023-09-27 11:59:43,579][inference][INFO] - Saving inference results -[2023-09-27 11:59:43,585][backend][INFO] - Cleaning pytorch backend -[2023-09-27 11:59:43,585][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 8cc3fc6127bd10e590231e5c7e6ff30f8937441d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index a38113164d14a32deb93f1a320c8844114291638..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00239,418.0,0.491,407.0 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/config.yaml deleted file mode 100644 index e808137260c93f3a5e79a7488d8c77c535b5afb8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/hydra.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/hydra.yaml deleted file mode 100644 index ece37f54d937729f90741487a5db30470a86fcdd..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16 - id: '2' - num: 2 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/overrides.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/overrides.yaml deleted file mode 100644 index 67f6580c5dd56716f1c23e4e90698198799b499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/experiment.log b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/experiment.log deleted file mode 100644 index a75ad8d76406eb6f99c6446298789fea3dcc001f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-27 11:59:44,233][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-27 11:59:45,152][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-27 11:59:45,152][backend][INFO] - Configuring pytorch backend -[2023-09-27 11:59:45,152][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:59:45,279][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 11:59:45,298][pytorch][INFO] - + Disabling gradients -[2023-09-27 11:59:45,299][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 11:59:45,702][pytorch][INFO] - + Turning on model's eval mode -[2023-09-27 11:59:45,703][benchmark][INFO] - Configuring inference benchmark -[2023-09-27 11:59:45,703][inference][INFO] - Running inference benchmark -[2023-09-27 11:59:45,703][input_generator][INFO] - Using llama model type generator -[2023-09-27 11:59:45,704][inference][INFO] - + Preparing input for the forward pass -[2023-09-27 11:59:45,704][inference][INFO] - + Warming up the forward pass -[2023-09-27 11:59:45,736][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-27 11:59:45,736][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 11:59:55,966][inference][INFO] - + Forward pass latency: 4.57e-03 (s) -[2023-09-27 11:59:55,968][inference][INFO] - + Forward pass throughput: 3500.00 (samples/s) -[2023-09-27 11:59:55,968][inference][INFO] - + Preparing input for the generation pass -[2023-09-27 11:59:55,968][inference][INFO] - + Warming up the generation pass -[2023-09-27 11:59:57,263][inference][INFO] - + Tracking generation latency and throughput -[2023-09-27 11:59:57,264][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:00:07,504][inference][INFO] - + Generation pass latency: 5.39e-01 (s) -[2023-09-27 12:00:07,505][inference][INFO] - + Generation pass throughput: 5940.00 (tokens/s) -[2023-09-27 12:00:07,505][inference][INFO] - Saving inference results -[2023-09-27 12:00:07,509][backend][INFO] - Cleaning pytorch backend -[2023-09-27 12:00:07,510][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 6d5005c942d252f9a4063f3d1220104f6cf8d9f3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index cbbefcbbbebd56ee0a7e63d4828378a2ebda3dd2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00457,3500.0,0.539,5940.0 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/config.yaml deleted file mode 100644 index 8c1798c7fccfef2f037edd3ad9cd17c6f4f89168..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/hydra.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/hydra.yaml deleted file mode 100644 index a9109f44db12d1db36f2115eee21f23b9a1ec813..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16 - id: '3' - num: 3 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/overrides.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/overrides.yaml deleted file mode 100644 index 9d69bb0ed87b5cb93e4c4e5d5b820de170746b79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/experiment.log b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/experiment.log deleted file mode 100644 index 60ba1967e34060dd9c1b9d4e71a6f0d740d116df..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-27 12:00:08,152][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-27 12:00:09,036][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-27 12:00:09,037][backend][INFO] - Configuring pytorch backend -[2023-09-27 12:00:09,037][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:00:09,156][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:00:09,177][pytorch][INFO] - + Disabling gradients -[2023-09-27 12:00:09,178][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 12:00:09,560][pytorch][INFO] - + Turning on model's eval mode -[2023-09-27 12:00:09,561][benchmark][INFO] - Configuring inference benchmark -[2023-09-27 12:00:09,561][inference][INFO] - Running inference benchmark -[2023-09-27 12:00:09,561][input_generator][INFO] - Using llama model type generator -[2023-09-27 12:00:09,562][inference][INFO] - + Preparing input for the forward pass -[2023-09-27 12:00:09,562][inference][INFO] - + Warming up the forward pass -[2023-09-27 12:00:09,587][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-27 12:00:09,588][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:00:19,728][inference][INFO] - + Forward pass latency: 4.10e-03 (s) -[2023-09-27 12:00:19,729][inference][INFO] - + Forward pass throughput: 3900.00 (samples/s) -[2023-09-27 12:00:19,730][inference][INFO] - + Preparing input for the generation pass -[2023-09-27 12:00:19,730][inference][INFO] - + Warming up the generation pass -[2023-09-27 12:00:20,353][inference][INFO] - + Tracking generation latency and throughput -[2023-09-27 12:00:20,353][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:00:30,827][inference][INFO] - + Generation pass latency: 6.55e-01 (s) -[2023-09-27 12:00:30,827][inference][INFO] - + Generation pass throughput: 4890.00 (tokens/s) -[2023-09-27 12:00:30,827][inference][INFO] - Saving inference results -[2023-09-27 12:00:30,832][backend][INFO] - Cleaning pytorch backend -[2023-09-27 12:00:30,832][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 53ab39b8c17b78cbdb9aeaa4950e44666a186692..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index 3f1065d35421b059dce56ca6e181c81476681466..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.0041,3900.0,0.655,4890.0 diff --git a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/multirun.yaml b/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/multirun.yaml deleted file mode 100644 index 7c81a1546dd4da5a94a657b8233d32ea068d43dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:21:54_153755ee386ac73e04814a94337abcb1208ff5d1/llama_1gpu_inference/multirun.yaml +++ /dev/null @@ -1,245 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: ??? - num: ??? - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: ??? - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/config.yaml deleted file mode 100644 index 2fa7431d84a1d03788f1e178b58c75b7f01f84a3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/hydra.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/hydra.yaml deleted file mode 100644 index 06b8786a50d79c8f56d1d3a0e5dc71b69f4e72b7..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: '32' - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float16 - id: '0' - num: 0 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/overrides.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/overrides.yaml deleted file mode 100644 index d5e7aac495a5978edf9851ed4029a4f4f4de707b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/experiment.log b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/experiment.log deleted file mode 100644 index ebe4fd74077942fe7ded430c4796ad312bfd57c2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/experiment.log +++ /dev/null @@ -1,17 +0,0 @@ -[2023-09-27 12:00:57,716][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-27 12:01:00,376][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-27 12:01:00,376][backend][INFO] - Configuring pytorch backend -[2023-09-27 12:01:00,379][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:01:00,505][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:01:00,518][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 12:01:01,286][benchmark][INFO] - Configuring training benchmark -[2023-09-27 12:01:01,287][training][INFO] - Running training benchmark -[2023-09-27 12:01:01,287][dataset_generator][INFO] - Using text-classification task generator -[2023-09-27 12:01:01,335][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-27 12:01:01,336][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-27 12:01:01,337][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-27 12:01:01,341][pytorch][INFO] - + Starting training -[2023-09-27 12:01:21,019][pytorch][INFO] - + Training finished successfully -[2023-09-27 12:01:21,019][training][INFO] - Saving training results -[2023-09-27 12:01:21,023][backend][INFO] - Cleaning pytorch backend -[2023-09-27 12:01:21,023][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/hydra_config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/hydra_config.yaml deleted file mode 100644 index ce5ceabbf670712f423e7b59bda6e78c3d9bf547..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/training_results.csv b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/training_results.csv deleted file mode 100644 index c450e3c5bab66be3ab4e62d584ff316626e5dab6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/0/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -6.093268394470215,210.06788428384843,13.473761320114136,239.87362720869555,19.567030906677246,165.17580083634869 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/config.yaml deleted file mode 100644 index 9481e5a5f6a940d90f8e3eeecf521edb6cba2b5f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/hydra.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/hydra.yaml deleted file mode 100644 index 6001d63061693bae93fc74ca2816e6a1e24c3f79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: '32' - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - +benchmark.training_arguments.per_device_train_batch_size=32 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: +benchmark.training_arguments.per_device_train_batch_size=32,backend.torch_dtype=float32 - id: '1' - num: 1 - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1 - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/overrides.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/overrides.yaml deleted file mode 100644 index f92a32ab90ab68f9e88427057fcb5ef3cbec936b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- +benchmark.training_arguments.per_device_train_batch_size=32 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/experiment.log b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/experiment.log deleted file mode 100644 index 352388c2f46a8a9ac9924e973f773935646a7a9b..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/experiment.log +++ /dev/null @@ -1,16 +0,0 @@ -[2023-09-27 12:01:22,703][pytorch][INFO] - + Inferred AutoModel class AutoModelForSequenceClassification for task text-classification and model_type bert -[2023-09-27 12:01:22,703][backend][INFO] - Configuring pytorch backend -[2023-09-27 12:01:22,703][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:01:22,827][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:01:22,842][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 12:01:23,406][benchmark][INFO] - Configuring training benchmark -[2023-09-27 12:01:23,407][training][INFO] - Running training benchmark -[2023-09-27 12:01:23,407][dataset_generator][INFO] - Using text-classification task generator -[2023-09-27 12:01:23,437][pytorch][INFO] - + Setting dataset format to `torch`. -[2023-09-27 12:01:23,437][pytorch][INFO] - + Wrapping training arguments with transformers.TrainingArguments -[2023-09-27 12:01:23,438][pytorch][INFO] - + Wrapping model with transformers.Trainer -[2023-09-27 12:01:23,443][pytorch][INFO] - + Starting training -[2023-09-27 12:02:09,561][pytorch][INFO] - + Training finished successfully -[2023-09-27 12:02:09,561][training][INFO] - Saving training results -[2023-09-27 12:02:09,563][backend][INFO] - Cleaning pytorch backend -[2023-09-27 12:02:09,563][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/hydra_config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/hydra_config.yaml deleted file mode 100644 index aaf5d59736c1ac950d8013078b3a4cec11901087..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/hydra_config.yaml +++ /dev/null @@ -1,75 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: false - eval_mode: false - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: false - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none - per_device_train_batch_size: 32 -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/training_results.csv b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/training_results.csv deleted file mode 100644 index 90b72cf5483fe4920bce1d14a0aaf6580295612f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/1/training_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -warmup.runtime(s),warmup.throughput(samples/s),training.runtime(s),training.throughput(samples/s),overall_training.runtime(s),overall_training.throughput(samples/s) -13.32169246673584,96.08388747872313,32.68557357788086,98.88154455356337,46.00726842880249,70.24976944679096 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/multirun.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/multirun.yaml deleted file mode 100644 index 7ec27559618fc6bbc0449f581cfb639b6b37f40f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/bert_1gpu_training/multirun.yaml +++ /dev/null @@ -1,246 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - +benchmark.training_arguments.per_device_train_batch_size: '32' - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: ??? - num: ??? - config_name: bert_1gpu_training - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: ??? - choices: - benchmark: training - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: training - _target_: optimum_benchmark.benchmarks.training.benchmark.TrainingBenchmark - warmup_steps: 40 - dataset_shapes: - dataset_size: 1500 - sequence_length: 256 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - training_arguments: - skip_memory_metrics: true - output_dir: ./trainer_output - use_cpu: ${is_cpu:${device}} - ddp_find_unused_parameters: false - do_train: true - do_eval: false - do_predict: false - report_to: none -experiment_name: bert_1gpu_training -model: bert-base-uncased -device: cuda -task: text-classification -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/config.yaml deleted file mode 100644 index ec0ef3c027bbeee0cbf48e5389c23fee9802c7b2..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/hydra.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/hydra.yaml deleted file mode 100644 index 3e1b6ab4b97a0357b2eb42b412e5cf88ef60fb83..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=1 - id: '0' - num: 0 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/overrides.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/overrides.yaml deleted file mode 100644 index b7fc5900f179157ac0449016dd8d9497ffd58db4..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/experiment.log b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/experiment.log deleted file mode 100644 index 8be8750f45fe19d2499d1634bc02c8144d226f04..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/experiment.log +++ /dev/null @@ -1,27 +0,0 @@ -[2023-09-27 12:02:13,594][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-27 12:02:13,737][experiment][WARNING] - Multiple GPUs detected but CUDA_DEVICE_ORDER is not set. This means that code might allocate resources from the wrong GPUs even if CUDA_VISIBLE_DEVICES is set. Pytorch uses the `FASTEST_FIRST` order by default, which is not guaranteed to be the same as nvidia-smi. `CUDA_DEVICE_ORDER` will be set to `PCI_BUS_ID` to ensure that the GPUs are allocated in the same order as nvidia-smi. -[2023-09-27 12:02:15,832][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-27 12:02:15,832][backend][INFO] - Configuring pytorch backend -[2023-09-27 12:02:15,833][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:02:15,962][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:02:15,977][pytorch][INFO] - + Disabling gradients -[2023-09-27 12:02:15,978][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 12:02:16,614][pytorch][INFO] - + Turning on model's eval mode -[2023-09-27 12:02:16,621][benchmark][INFO] - Configuring inference benchmark -[2023-09-27 12:02:16,621][inference][INFO] - Running inference benchmark -[2023-09-27 12:02:16,622][input_generator][INFO] - Using llama model type generator -[2023-09-27 12:02:16,642][inference][INFO] - + Preparing input for the forward pass -[2023-09-27 12:02:16,642][inference][INFO] - + Warming up the forward pass -[2023-09-27 12:02:16,970][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-27 12:02:16,971][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:02:27,175][inference][INFO] - + Forward pass latency: 3.32e-03 (s) -[2023-09-27 12:02:27,177][inference][INFO] - + Forward pass throughput: 301.00 (samples/s) -[2023-09-27 12:02:27,177][inference][INFO] - + Preparing input for the generation pass -[2023-09-27 12:02:27,178][inference][INFO] - + Warming up the generation pass -[2023-09-27 12:02:28,309][inference][INFO] - + Tracking generation latency and throughput -[2023-09-27 12:02:28,309][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:02:38,515][inference][INFO] - + Generation pass latency: 5.37e-01 (s) -[2023-09-27 12:02:38,515][inference][INFO] - + Generation pass throughput: 372.00 (tokens/s) -[2023-09-27 12:02:38,515][inference][INFO] - Saving inference results -[2023-09-27 12:02:38,522][backend][INFO] - Cleaning pytorch backend -[2023-09-27 12:02:38,522][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/hydra_config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/hydra_config.yaml deleted file mode 100644 index 1b2c1057cf26f3ea842d62d062d093cc577ef846..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/inference_results.csv b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/inference_results.csv deleted file mode 100644 index 8db5af89c898ac6bc0063b3f62a613d0f17018fe..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/0/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00332,301.0,0.537,372.0 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/config.yaml deleted file mode 100644 index 8bddc839f46a99f0877837da7c1db809c4ff56f6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/hydra.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/hydra.yaml deleted file mode 100644 index 2d70666a6e8aef3931d3b013f37e780494d4d084..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=1 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=1 - id: '1' - num: 1 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/overrides.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/overrides.yaml deleted file mode 100644 index 8b4741e9eb919dcf02db7f865f28d92490b262b0..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=1 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/experiment.log b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/experiment.log deleted file mode 100644 index 8031466380240f5386118336039dc73d1a2c048c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-27 12:02:39,149][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-27 12:02:40,059][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-27 12:02:40,059][backend][INFO] - Configuring pytorch backend -[2023-09-27 12:02:40,059][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:02:40,182][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:02:40,199][pytorch][INFO] - + Disabling gradients -[2023-09-27 12:02:40,200][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 12:02:40,560][pytorch][INFO] - + Turning on model's eval mode -[2023-09-27 12:02:40,561][benchmark][INFO] - Configuring inference benchmark -[2023-09-27 12:02:40,561][inference][INFO] - Running inference benchmark -[2023-09-27 12:02:40,562][input_generator][INFO] - Using llama model type generator -[2023-09-27 12:02:40,562][inference][INFO] - + Preparing input for the forward pass -[2023-09-27 12:02:40,562][inference][INFO] - + Warming up the forward pass -[2023-09-27 12:02:40,783][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-27 12:02:40,783][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:02:51,082][inference][INFO] - + Forward pass latency: 3.12e-03 (s) -[2023-09-27 12:02:51,084][inference][INFO] - + Forward pass throughput: 321.00 (samples/s) -[2023-09-27 12:02:51,084][inference][INFO] - + Preparing input for the generation pass -[2023-09-27 12:02:51,084][inference][INFO] - + Warming up the generation pass -[2023-09-27 12:02:51,711][inference][INFO] - + Tracking generation latency and throughput -[2023-09-27 12:02:51,711][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:03:01,932][inference][INFO] - + Generation pass latency: 6.39e-01 (s) -[2023-09-27 12:03:01,933][inference][INFO] - + Generation pass throughput: 313.00 (tokens/s) -[2023-09-27 12:03:01,933][inference][INFO] - Saving inference results -[2023-09-27 12:03:01,938][backend][INFO] - Cleaning pytorch backend -[2023-09-27 12:03:01,939][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/hydra_config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/hydra_config.yaml deleted file mode 100644 index 8cc3fc6127bd10e590231e5c7e6ff30f8937441d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 1 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/inference_results.csv b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/inference_results.csv deleted file mode 100644 index d94af46c1f38568e00d294b81749c6561a4d88d6..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/1/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00312,321.0,0.639,313.0 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/config.yaml deleted file mode 100644 index e808137260c93f3a5e79a7488d8c77c535b5afb8..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/hydra.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/hydra.yaml deleted file mode 100644 index ea10c87ecf96622f5b043dba88776a97c1efdb29..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float16 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float16,benchmark.input_shapes.batch_size=16 - id: '2' - num: 2 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/overrides.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/overrides.yaml deleted file mode 100644 index 67f6580c5dd56716f1c23e4e90698198799b499f..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float16 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/experiment.log b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/experiment.log deleted file mode 100644 index ed828ab5f5a4f011fd7584fb2a3870608cb77e46..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-27 12:03:02,564][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-27 12:03:03,480][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-27 12:03:03,480][backend][INFO] - Configuring pytorch backend -[2023-09-27 12:03:03,481][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:03:03,603][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:03:03,624][pytorch][INFO] - + Disabling gradients -[2023-09-27 12:03:03,625][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 12:03:03,982][pytorch][INFO] - + Turning on model's eval mode -[2023-09-27 12:03:03,982][benchmark][INFO] - Configuring inference benchmark -[2023-09-27 12:03:03,982][inference][INFO] - Running inference benchmark -[2023-09-27 12:03:03,983][input_generator][INFO] - Using llama model type generator -[2023-09-27 12:03:03,983][inference][INFO] - + Preparing input for the forward pass -[2023-09-27 12:03:03,983][inference][INFO] - + Warming up the forward pass -[2023-09-27 12:03:04,016][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-27 12:03:04,016][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:03:14,407][inference][INFO] - + Forward pass latency: 6.09e-03 (s) -[2023-09-27 12:03:14,409][inference][INFO] - + Forward pass throughput: 2630.00 (samples/s) -[2023-09-27 12:03:14,409][inference][INFO] - + Preparing input for the generation pass -[2023-09-27 12:03:14,409][inference][INFO] - + Warming up the generation pass -[2023-09-27 12:03:15,902][inference][INFO] - + Tracking generation latency and throughput -[2023-09-27 12:03:15,903][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:03:26,584][inference][INFO] - + Generation pass latency: 8.22e-01 (s) -[2023-09-27 12:03:26,585][inference][INFO] - + Generation pass throughput: 3890.00 (tokens/s) -[2023-09-27 12:03:26,585][inference][INFO] - Saving inference results -[2023-09-27 12:03:26,589][backend][INFO] - Cleaning pytorch backend -[2023-09-27 12:03:26,589][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/hydra_config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/hydra_config.yaml deleted file mode 100644 index 6d5005c942d252f9a4063f3d1220104f6cf8d9f3..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float16 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/inference_results.csv b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/inference_results.csv deleted file mode 100644 index dc4c95392b0723f4009d6b93fb58172176d4149c..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/2/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00609,2630.0,0.822,3890.0 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/config.yaml deleted file mode 100644 index 8c1798c7fccfef2f037edd3ad9cd17c6f4f89168..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/config.yaml +++ /dev/null @@ -1,73 +0,0 @@ -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/hydra.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/hydra.yaml deleted file mode 100644 index ad9df65c97c4081b66b1832a6d7aa996aa541d39..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/hydra.yaml +++ /dev/null @@ -1,174 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: - - benchmark.input_shapes.batch_size=16 - - backend.torch_dtype=float32 - job: - name: experiment - chdir: true - override_dirname: backend.torch_dtype=float32,benchmark.input_shapes.batch_size=16 - id: '3' - num: 3 - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: /home/user/transformers-regression/sweeps/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3 - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/overrides.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/overrides.yaml deleted file mode 100644 index 9d69bb0ed87b5cb93e4c4e5d5b820de170746b79..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/.config/overrides.yaml +++ /dev/null @@ -1,2 +0,0 @@ -- benchmark.input_shapes.batch_size=16 -- backend.torch_dtype=float32 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/experiment.log b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/experiment.log deleted file mode 100644 index 8914815a0582330ed8e613b6ac08a0e03e6f1632..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/experiment.log +++ /dev/null @@ -1,26 +0,0 @@ -[2023-09-27 12:03:27,204][inference][INFO] - `new_tokens` was set to 200. `max_new_tokens` and `min_new_tokens` will be set to 200. -[2023-09-27 12:03:28,110][pytorch][INFO] - + Inferred AutoModel class AutoModelForCausalLM for task text-generation and model_type llama -[2023-09-27 12:03:28,110][backend][INFO] - Configuring pytorch backend -[2023-09-27 12:03:28,110][backend][INFO] - + Checking initial device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:03:28,228][backend][INFO] - + Checking continuous device(s) isolation of CUDA device(s): [0] -[2023-09-27 12:03:28,248][pytorch][INFO] - + Disabling gradients -[2023-09-27 12:03:28,249][pytorch][INFO] - + Loading model on device: cuda -[2023-09-27 12:03:28,622][pytorch][INFO] - + Turning on model's eval mode -[2023-09-27 12:03:28,622][benchmark][INFO] - Configuring inference benchmark -[2023-09-27 12:03:28,623][inference][INFO] - Running inference benchmark -[2023-09-27 12:03:28,623][input_generator][INFO] - Using llama model type generator -[2023-09-27 12:03:28,624][inference][INFO] - + Preparing input for the forward pass -[2023-09-27 12:03:28,660][inference][INFO] - + Warming up the forward pass -[2023-09-27 12:03:28,686][inference][INFO] - + Tracking forward pass latency and throughput -[2023-09-27 12:03:28,686][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:03:38,803][inference][INFO] - + Forward pass latency: 4.87e-03 (s) -[2023-09-27 12:03:38,804][inference][INFO] - + Forward pass throughput: 3290.00 (samples/s) -[2023-09-27 12:03:38,805][inference][INFO] - + Preparing input for the generation pass -[2023-09-27 12:03:38,805][inference][INFO] - + Warming up the generation pass -[2023-09-27 12:03:39,433][inference][INFO] - + Tracking generation latency and throughput -[2023-09-27 12:03:39,433][latency_tracker][INFO] - Tracked Pytorch devices: [0] -[2023-09-27 12:03:50,151][inference][INFO] - + Generation pass latency: 7.65e-01 (s) -[2023-09-27 12:03:50,151][inference][INFO] - + Generation pass throughput: 4180.00 (tokens/s) -[2023-09-27 12:03:50,151][inference][INFO] - Saving inference results -[2023-09-27 12:03:50,156][backend][INFO] - Cleaning pytorch backend -[2023-09-27 12:03:50,156][backend][INFO] - + Deleting pretrained model diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/hydra_config.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/hydra_config.yaml deleted file mode 100644 index 53ab39b8c17b78cbdb9aeaa4950e44666a186692..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/hydra_config.yaml +++ /dev/null @@ -1,79 +0,0 @@ -backend: - name: pytorch - version: 2.1.0+rocm5.6 - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: float32 - disable_grad: true - eval_mode: true - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 16 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: false - can_generate: true - forward_kwargs: {} - generate_kwargs: - max_new_tokens: 200 - min_new_tokens: 200 - do_sample: false - use_cache: true - pad_token_id: 0 - num_beams: 1 -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: text-generation -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/inference_results.csv b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/inference_results.csv deleted file mode 100644 index d92bc38c1f9fa844f338fc2486245137b8de978d..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/3/inference_results.csv +++ /dev/null @@ -1,2 +0,0 @@ -forward.latency(s),forward.throughput(samples/s),generate.latency(s),generate.throughput(tokens/s) -0.00487,3290.0,0.765,4180.0 diff --git a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/multirun.yaml b/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/multirun.yaml deleted file mode 100644 index 7c81a1546dd4da5a94a657b8233d32ea068d43dc..0000000000000000000000000000000000000000 --- a/raw_results/2023-09-27_10:28:40_946bac798caefada3f5f1c9fecdcfd587ed24ac7/llama_1gpu_inference/multirun.yaml +++ /dev/null @@ -1,245 +0,0 @@ -hydra: - run: - dir: runs/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - sweep: - dir: sweeps/${oc.env:COMMIT_DATE_GMT}_${oc.env:COMMIT_SHA}/${experiment_name} - subdir: ${hydra.job.num} - launcher: - _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher - sweeper: - _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper - max_batch_size: null - params: - benchmark.input_shapes.batch_size: 1,16 - backend.torch_dtype: float16,float32 - help: - app_name: ${hydra.job.name} - header: '${hydra.help.app_name} is powered by Hydra. - - ' - footer: 'Powered by Hydra (https://hydra.cc) - - Use --hydra-help to view Hydra specific help - - ' - template: '${hydra.help.header} - - == Configuration groups == - - Compose your configuration from those groups (group=option) - - - $APP_CONFIG_GROUPS - - - == Config == - - Override anything in the config (foo.bar=value) - - - $CONFIG - - - ${hydra.help.footer} - - ' - hydra_help: - template: 'Hydra (${hydra.runtime.version}) - - See https://hydra.cc for more info. - - - == Flags == - - $FLAGS_HELP - - - == Configuration groups == - - Compose your configuration from those groups (For example, append hydra/job_logging=disabled - to command line) - - - $HYDRA_CONFIG_GROUPS - - - Use ''--cfg hydra'' to Show the Hydra config. - - ' - hydra_help: ??? - hydra_logging: - version: 1 - formatters: - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(purple)sHYDRA%(reset)s] %(message)s' - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - root: - level: INFO - handlers: - - console - disable_existing_loggers: false - job_logging: - version: 1 - formatters: - simple: - format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s' - colorlog: - (): colorlog.ColoredFormatter - format: '[%(cyan)s%(asctime)s%(reset)s][%(blue)s%(name)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] - - %(message)s' - log_colors: - DEBUG: purple - INFO: green - WARNING: yellow - ERROR: red - CRITICAL: red - handlers: - console: - class: logging.StreamHandler - formatter: colorlog - stream: ext://sys.stdout - file: - class: logging.FileHandler - formatter: simple - filename: ${hydra.job.name}.log - root: - level: INFO - handlers: - - console - - file - disable_existing_loggers: false - env: {} - mode: MULTIRUN - searchpath: [] - callbacks: {} - output_subdir: .hydra - overrides: - hydra: - - hydra.mode=MULTIRUN - task: [] - job: - name: experiment - chdir: true - override_dirname: '' - id: ??? - num: ??? - config_name: llama2_1gpu_inference - env_set: {} - env_copy: [] - config: - override_dirname: - kv_sep: '=' - item_sep: ',' - exclude_keys: [] - runtime: - version: 1.3.2 - version_base: '1.3' - cwd: /home/user/transformers-regression - config_sources: - - path: hydra.conf - schema: pkg - provider: hydra - - path: optimum_benchmark - schema: pkg - provider: main - - path: hydra_plugins.hydra_colorlog.conf - schema: pkg - provider: hydra-colorlog - - path: /home/user/transformers-regression/configs - schema: file - provider: command-line - - path: '' - schema: structured - provider: schema - output_dir: ??? - choices: - benchmark: inference - backend: pytorch - hydra/env: default - hydra/callbacks: null - hydra/job_logging: colorlog - hydra/hydra_logging: colorlog - hydra/hydra_help: default - hydra/help: default - hydra/sweeper: basic - hydra/launcher: basic - hydra/output: default - verbose: false -backend: - name: pytorch - version: ${pytorch_version:} - _target_: optimum_benchmark.backends.pytorch.backend.PyTorchBackend - seed: 42 - inter_op_num_threads: null - intra_op_num_threads: null - initial_isolation_check: true - continous_isolation_check: true - delete_cache: false - no_weights: false - device_map: null - torch_dtype: null - disable_grad: ${is_inference:${benchmark.name}} - eval_mode: ${is_inference:${benchmark.name}} - amp_autocast: false - amp_dtype: null - torch_compile: false - torch_compile_config: {} - bettertransformer: false - quantization_scheme: null - quantization_config: {} - use_ddp: false - ddp_config: {} - peft_strategy: null - peft_config: {} -benchmark: - name: inference - _target_: optimum_benchmark.benchmarks.inference.benchmark.InferenceBenchmark - duration: 10 - warmup_runs: 10 - memory: false - energy: false - input_shapes: - batch_size: 2 - sequence_length: 200 - num_choices: 1 - feature_size: 80 - nb_max_frames: 3000 - audio_sequence_length: 16000 - new_tokens: 200 - can_diffuse: ${can_diffuse:${task}} - can_generate: ${can_generate:${task}} - forward_kwargs: {} - generate_kwargs: {} -experiment_name: llama_1gpu_inference -model: fxmarty/tiny-llama-fast-tokenizer -device: cuda -task: ${infer_task:${model}} -hub_kwargs: - revision: main - cache_dir: null - force_download: false - local_files_only: false -environment: - optimum_version: 1.13.1 - transformers_version: 4.34.0.dev0 - accelerate_version: 0.23.0 - diffusers_version: null - python_version: 3.10.12 - system: Linux - cpu: ' AMD EPYC 7643 48-Core Processor' - cpu_count: 96 - cpu_ram_mb: 1082028 - gpus: - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210 - - Instinct MI210