Spaces:
Running
Running
Commit
•
0f9db6d
1
Parent(s):
9a78645
test
Browse files- app.py +138 -85
- benchmark.log +41 -0
- config_store.py +4 -0
- packages.txt +0 -0
- pyproject.toml +0 -3
app.py
CHANGED
@@ -3,12 +3,14 @@ import time
|
|
3 |
from huggingface_hub import create_repo, whoami
|
4 |
import gradio as gr
|
5 |
from config_store import (
|
|
|
6 |
get_inference_config,
|
7 |
get_onnxruntime_config,
|
8 |
get_openvino_config,
|
9 |
get_pytorch_config,
|
10 |
-
|
11 |
)
|
|
|
12 |
from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
|
13 |
from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
|
14 |
from optimum_benchmark.backends.onnxruntime.utils import TASKS_TO_ORTMODELS
|
@@ -25,15 +27,17 @@ from optimum_benchmark import (
|
|
25 |
)
|
26 |
from optimum_benchmark.logging_utils import setup_logging
|
27 |
|
28 |
-
os.environ["LOG_TO_FILE"] = "0"
|
29 |
-
os.environ["LOG_LEVEL"] = "INFO"
|
30 |
-
setup_logging(level="INFO", prefix="MAIN-PROCESS")
|
31 |
|
32 |
DEVICE = "cpu"
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
|
|
|
|
|
|
|
|
|
|
37 |
set(TASKS_TO_OVMODEL.keys())
|
38 |
& set(TASKS_TO_ORTMODELS.keys())
|
39 |
& set(TASKS_TO_IPEXMODEL.keys())
|
@@ -67,14 +71,16 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
|
67 |
model = value
|
68 |
elif key.label == "task":
|
69 |
task = value
|
|
|
|
|
70 |
elif "." in key.label:
|
71 |
backend, argument = key.label.split(".")
|
72 |
configs[backend][argument] = value
|
73 |
else:
|
74 |
continue
|
75 |
|
76 |
-
|
77 |
-
|
78 |
|
79 |
configs["onnxruntime"] = ORTConfig(
|
80 |
task=task,
|
@@ -101,17 +107,31 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
|
101 |
**configs["ipex"],
|
102 |
)
|
103 |
|
104 |
-
for
|
105 |
-
|
106 |
-
|
107 |
-
|
|
|
|
|
|
|
|
|
108 |
benchmark_config = BenchmarkConfig(
|
109 |
name=benchmark_name,
|
110 |
-
launcher=process_config,
|
111 |
-
scenario=inference_config,
|
112 |
backend=configs[backend],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
)
|
114 |
-
benchmark_report = Benchmark.run(benchmark_config)
|
115 |
benchmark = Benchmark(config=benchmark_config, report=benchmark_report)
|
116 |
benchmark.push_to_hub(
|
117 |
repo_id=f"{username}/benchmarks",
|
@@ -119,79 +139,112 @@ def run_benchmark(kwargs, oauth_token: gr.OAuthToken):
|
|
119 |
token=oauth_token.token,
|
120 |
)
|
121 |
|
122 |
-
|
123 |
|
|
|
124 |
|
125 |
-
with gr.Blocks() as demo:
|
126 |
-
# add login button
|
127 |
-
gr.LoginButton(min_width=250)
|
128 |
|
129 |
-
|
130 |
-
gr.
|
131 |
-
|
132 |
-
|
133 |
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
gr.HTML(
|
139 |
-
"<h3 style='text-align: center'>"
|
140 |
-
"Zero code Gradio interface of "
|
141 |
-
"<a href='https://github.com/huggingface/optimum-benchmark.git'>"
|
142 |
-
"Optimum-Benchmark"
|
143 |
-
"</a>"
|
144 |
-
"<br>"
|
145 |
-
"</h3>"
|
146 |
-
)
|
147 |
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
160 |
|
161 |
-
with gr.Row():
|
162 |
-
with gr.Accordion(label="Process Config", open=False, visible=True):
|
163 |
-
process_config = get_process_config()
|
164 |
-
|
165 |
-
with gr.Row():
|
166 |
-
with gr.Accordion(label="PyTorch Config", open=True, visible=True):
|
167 |
-
pytorch_config = get_pytorch_config()
|
168 |
-
with gr.Accordion(label="OpenVINO Config", open=True, visible=True):
|
169 |
-
openvino_config = get_openvino_config()
|
170 |
-
with gr.Accordion(label="OnnxRuntime Config", open=True, visible=True):
|
171 |
-
onnxruntime_config = get_onnxruntime_config()
|
172 |
-
|
173 |
-
with gr.Row():
|
174 |
-
with gr.Accordion(label="Scenario Config", open=False, visible=True):
|
175 |
-
inference_config = get_inference_config()
|
176 |
-
|
177 |
-
button = gr.Button(value="Run Benchmark", variant="primary")
|
178 |
-
|
179 |
-
html_output = gr.HTML()
|
180 |
-
|
181 |
-
button.click(
|
182 |
-
fn=run_benchmark,
|
183 |
-
inputs={
|
184 |
-
task,
|
185 |
-
model,
|
186 |
-
*process_config.values(),
|
187 |
-
*inference_config.values(),
|
188 |
-
*onnxruntime_config.values(),
|
189 |
-
*openvino_config.values(),
|
190 |
-
*pytorch_config.values(),
|
191 |
-
},
|
192 |
-
outputs=[html_output],
|
193 |
-
concurrency_limit=1,
|
194 |
-
)
|
195 |
|
|
|
|
|
|
|
|
|
196 |
|
197 |
-
demo
|
|
|
|
3 |
from huggingface_hub import create_repo, whoami
|
4 |
import gradio as gr
|
5 |
from config_store import (
|
6 |
+
get_process_config,
|
7 |
get_inference_config,
|
8 |
get_onnxruntime_config,
|
9 |
get_openvino_config,
|
10 |
get_pytorch_config,
|
11 |
+
get_ipex_config,
|
12 |
)
|
13 |
+
from optimum_benchmark.launchers.base import Launcher # noqa
|
14 |
from optimum_benchmark.backends.openvino.utils import TASKS_TO_OVMODEL
|
15 |
from optimum_benchmark.backends.transformers_utils import TASKS_TO_MODEL_LOADERS
|
16 |
from optimum_benchmark.backends.onnxruntime.utils import TASKS_TO_ORTMODELS
|
|
|
27 |
)
|
28 |
from optimum_benchmark.logging_utils import setup_logging
|
29 |
|
|
|
|
|
|
|
30 |
|
31 |
DEVICE = "cpu"
|
32 |
+
LAUNCHER = "process"
|
33 |
+
SCENARIO = "inference"
|
34 |
+
BACKENDS = ["onnxruntime", "openvino", "pytorch", "ipex"]
|
35 |
+
MODELS = [
|
36 |
+
"hf-internal-testing/tiny-random-bert",
|
37 |
+
"google-bert/bert-base-uncased",
|
38 |
+
"openai-community/gpt2",
|
39 |
+
]
|
40 |
+
TASKS = (
|
41 |
set(TASKS_TO_OVMODEL.keys())
|
42 |
& set(TASKS_TO_ORTMODELS.keys())
|
43 |
& set(TASKS_TO_IPEXMODEL.keys())
|
|
|
71 |
model = value
|
72 |
elif key.label == "task":
|
73 |
task = value
|
74 |
+
elif key.label == "backends":
|
75 |
+
backends = value
|
76 |
elif "." in key.label:
|
77 |
backend, argument = key.label.split(".")
|
78 |
configs[backend][argument] = value
|
79 |
else:
|
80 |
continue
|
81 |
|
82 |
+
configs["process"] = ProcessConfig(**configs.pop("process"))
|
83 |
+
configs["inference"] = InferenceConfig(**configs.pop("inference"))
|
84 |
|
85 |
configs["onnxruntime"] = ORTConfig(
|
86 |
task=task,
|
|
|
107 |
**configs["ipex"],
|
108 |
)
|
109 |
|
110 |
+
html_output = f"<h3>Running benchmark for model {model} on task {task} with backends {backends}</h3>"
|
111 |
+
|
112 |
+
yield html_output
|
113 |
+
|
114 |
+
timestamp = time.strftime("%Y-%m-%d-%H-%M-%S")
|
115 |
+
|
116 |
+
for backend in backends:
|
117 |
+
benchmark_name = f"{timestamp}/{backend}"
|
118 |
benchmark_config = BenchmarkConfig(
|
119 |
name=benchmark_name,
|
|
|
|
|
120 |
backend=configs[backend],
|
121 |
+
launcher=configs[LAUNCHER],
|
122 |
+
scenario=configs[SCENARIO],
|
123 |
+
)
|
124 |
+
benchmark_config.push_to_hub(
|
125 |
+
repo_id=f"{username}/benchmarks",
|
126 |
+
subfolder=benchmark_name,
|
127 |
+
token=oauth_token.token,
|
128 |
+
)
|
129 |
+
benchmark_report = Benchmark.launch(benchmark_config)
|
130 |
+
benchmark_report.push_to_hub(
|
131 |
+
repo_id=f"{username}/benchmarks",
|
132 |
+
subfolder=benchmark_name,
|
133 |
+
token=oauth_token.token,
|
134 |
)
|
|
|
135 |
benchmark = Benchmark(config=benchmark_config, report=benchmark_report)
|
136 |
benchmark.push_to_hub(
|
137 |
repo_id=f"{username}/benchmarks",
|
|
|
139 |
token=oauth_token.token,
|
140 |
)
|
141 |
|
142 |
+
html_output += f"<br>📊 Benchmark report for {backend} backend in the folder {benchmark_name} of your benchmarks dataset"
|
143 |
|
144 |
+
yield html_output
|
145 |
|
|
|
|
|
|
|
146 |
|
147 |
+
def build_demo():
|
148 |
+
with gr.Blocks() as demo:
|
149 |
+
# add login button
|
150 |
+
gr.LoginButton(min_width=250)
|
151 |
|
152 |
+
# add image
|
153 |
+
gr.Markdown(
|
154 |
+
"""<img src="https://huggingface.co/spaces/optimum/optimum-benchmark-ui/resolve/main/huggy_bench.png" style="display: block; margin-left: auto; margin-right: auto; width: 30%;">"""
|
155 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
|
157 |
+
# title text
|
158 |
+
gr.Markdown(
|
159 |
+
"<h1 style='text-align: center'>🤗 Optimum-Benchmark Interface 🏋️</h1>"
|
160 |
+
)
|
161 |
+
|
162 |
+
# explanation text
|
163 |
+
gr.HTML(
|
164 |
+
"<h3 style='text-align: center'>"
|
165 |
+
"Zero code Gradio interface of "
|
166 |
+
"<a href='https://github.com/huggingface/optimum-benchmark.git'>"
|
167 |
+
"Optimum-Benchmark"
|
168 |
+
"</a>"
|
169 |
+
"<br>"
|
170 |
+
"</h3>"
|
171 |
+
)
|
172 |
+
|
173 |
+
model = gr.Dropdown(
|
174 |
+
label="model",
|
175 |
+
choices=MODELS,
|
176 |
+
value=MODELS[0],
|
177 |
+
info="Model to run the benchmark on.",
|
178 |
+
)
|
179 |
+
task = gr.Dropdown(
|
180 |
+
label="task",
|
181 |
+
choices=TASKS,
|
182 |
+
value="feature-extraction",
|
183 |
+
info="Task to run the benchmark on.",
|
184 |
+
)
|
185 |
+
backends = gr.CheckboxGroup(
|
186 |
+
interactive=True,
|
187 |
+
label="backends",
|
188 |
+
choices=BACKENDS,
|
189 |
+
value=BACKENDS,
|
190 |
+
info="Backends to run the benchmark on.",
|
191 |
+
)
|
192 |
+
|
193 |
+
with gr.Row():
|
194 |
+
with gr.Accordion(label="Process Config", open=False, visible=True):
|
195 |
+
process_config = get_process_config()
|
196 |
+
|
197 |
+
with gr.Row() as backend_configs:
|
198 |
+
with gr.Accordion(label="OnnxRuntime Config", open=False, visible=True):
|
199 |
+
onnxruntime_config = get_onnxruntime_config()
|
200 |
+
with gr.Accordion(label="OpenVINO Config", open=False, visible=True):
|
201 |
+
openvino_config = get_openvino_config()
|
202 |
+
with gr.Accordion(label="PyTorch Config", open=False, visible=True):
|
203 |
+
pytorch_config = get_pytorch_config()
|
204 |
+
with gr.Accordion(label="IPEX Config", open=False, visible=True):
|
205 |
+
ipex_config = get_ipex_config()
|
206 |
+
|
207 |
+
backends.change(
|
208 |
+
inputs=backends,
|
209 |
+
outputs=backend_configs.children,
|
210 |
+
fn=lambda values: [
|
211 |
+
gr.update(visible=value in values) for value in BACKENDS
|
212 |
+
],
|
213 |
+
)
|
214 |
+
|
215 |
+
with gr.Row():
|
216 |
+
with gr.Accordion(label="Scenario Config", open=False, visible=True):
|
217 |
+
inference_config = get_inference_config()
|
218 |
+
|
219 |
+
button = gr.Button(value="Run Benchmark", variant="primary")
|
220 |
+
|
221 |
+
with gr.Row():
|
222 |
+
html_output = gr.HTML(label="Output", value="")
|
223 |
+
|
224 |
+
button.click(
|
225 |
+
fn=run_benchmark,
|
226 |
+
inputs={
|
227 |
+
task,
|
228 |
+
model,
|
229 |
+
backends,
|
230 |
+
*process_config.values(),
|
231 |
+
*inference_config.values(),
|
232 |
+
*onnxruntime_config.values(),
|
233 |
+
*openvino_config.values(),
|
234 |
+
*pytorch_config.values(),
|
235 |
+
*ipex_config.values(),
|
236 |
+
},
|
237 |
+
outputs=[html_output],
|
238 |
+
concurrency_limit=1,
|
239 |
+
)
|
240 |
+
|
241 |
+
return demo
|
242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
|
244 |
+
if __name__ == "__main__":
|
245 |
+
os.environ["LOG_TO_FILE"] = "0"
|
246 |
+
os.environ["LOG_LEVEL"] = "INFO"
|
247 |
+
setup_logging(level="INFO", prefix="MAIN-PROCESS")
|
248 |
|
249 |
+
demo = build_demo()
|
250 |
+
demo.queue(max_size=10).launch()
|
benchmark.log
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:27,439[0m][[34mpytorch[0m][[32mINFO[0m] - Allocating pytorch backend[0m
|
2 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:27,439[0m][[34mpytorch[0m][[32mINFO[0m] - + Seeding backend with 42[0m
|
3 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:27,440[0m][[34mpytorch[0m][[32mINFO[0m] - + Benchmarking a Transformers model[0m
|
4 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,549[0m][[34minference[0m][[32mINFO[0m] - Allocating inference scenario[0m
|
5 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,549[0m][[34minference[0m][[32mINFO[0m] - + Creating input generator[0m
|
6 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,550[0m][[34minference[0m][[32mINFO[0m] - + Generating Inference inputs[0m
|
7 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,550[0m][[34minference[0m][[32mINFO[0m] - + Initializing Inference report[0m
|
8 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,551[0m][[34minference[0m][[32mINFO[0m] - + Preparing input shapes for Inference[0m
|
9 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,551[0m][[34minference[0m][[32mINFO[0m] - + Running model loading tracking[0m
|
10 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,551[0m][[34mlatency[0m][[32mINFO[0m] - + Tracking latency using CPU performance counter[0m
|
11 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,551[0m][[34minference[0m][[32mINFO[0m] - + Loading model for Inference[0m
|
12 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,552[0m][[34mpytorch[0m][[32mINFO[0m] - + Creating backend temporary directory[0m
|
13 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,553[0m][[34mpytorch[0m][[32mINFO[0m] - + Loading model with pretrained weights[0m
|
14 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,554[0m][[34mpytorch[0m][[32mINFO[0m] - + Loading Transformers model[0m
|
15 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,881[0m][[34mpytorch[0m][[32mINFO[0m] - + Enabling eval mode[0m
|
16 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,883[0m][[34mpytorch[0m][[32mINFO[0m] - + Cleaning up backend temporary directory[0m
|
17 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,884[0m][[34minference[0m][[32mINFO[0m] - + Preparing inputs for Inference[0m
|
18 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:28,885[0m][[34minference[0m][[32mINFO[0m] - + Warming up backend for Inference[0m
|
19 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:29,252[0m][[34minference[0m][[32mINFO[0m] - + Running Inference latency tracking[0m
|
20 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:29,252[0m][[34mlatency[0m][[32mINFO[0m] - + Tracking latency using CPU performance counter[0m
|
21 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,282[0m][[34mlatency[0m][[32mINFO[0m] - + load latency:[0m
|
22 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,282[0m][[34mlatency[0m][[32mINFO[0m] - - count: 1[0m
|
23 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,283[0m][[34mlatency[0m][[32mINFO[0m] - - total: 0.331631 s[0m
|
24 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,283[0m][[34mlatency[0m][[32mINFO[0m] - - mean: 0.331631 s[0m
|
25 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,283[0m][[34mlatency[0m][[32mINFO[0m] - - stdev: 0.000000 s (0.00%)[0m
|
26 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,284[0m][[34mlatency[0m][[32mINFO[0m] - - p50: 0.331631 s[0m
|
27 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,284[0m][[34mlatency[0m][[32mINFO[0m] - - p90: 0.331631 s[0m
|
28 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,284[0m][[34mlatency[0m][[32mINFO[0m] - - p95: 0.331631 s[0m
|
29 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,284[0m][[34mlatency[0m][[32mINFO[0m] - - p99: 0.331631 s[0m
|
30 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,285[0m][[34mlatency[0m][[32mINFO[0m] - + forward latency:[0m
|
31 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,285[0m][[34mlatency[0m][[32mINFO[0m] - - count: 266[0m
|
32 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,285[0m][[34mlatency[0m][[32mINFO[0m] - - total: 10.021863 s[0m
|
33 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,285[0m][[34mlatency[0m][[32mINFO[0m] - - mean: 0.037676 s[0m
|
34 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,286[0m][[34mlatency[0m][[32mINFO[0m] - - stdev: 0.008922 s (23.68%)[0m
|
35 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,286[0m][[34mlatency[0m][[32mINFO[0m] - - p50: 0.035970 s[0m
|
36 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,286[0m][[34mlatency[0m][[32mINFO[0m] - - p90: 0.043994 s[0m
|
37 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,286[0m][[34mlatency[0m][[32mINFO[0m] - - p95: 0.046884 s[0m
|
38 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,287[0m][[34mlatency[0m][[32mINFO[0m] - - p99: 0.073021 s[0m
|
39 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,288[0m][[34mlatency[0m][[32mINFO[0m] - + forward throughput: 53.083941 samples/s[0m
|
40 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,296[0m][[34mprocess[0m][[32mINFO[0m] - + Sending report to main process[0m
|
41 |
+
[ISOLATED-PROCESS][[36m2024-09-25 18:53:39,297[0m][[34mprocess[0m][[32mINFO[0m] - + Exiting isolated process[0m
|
config_store.py
CHANGED
@@ -88,6 +88,10 @@ def get_openvino_config():
|
|
88 |
}
|
89 |
|
90 |
|
|
|
|
|
|
|
|
|
91 |
def get_inference_config():
|
92 |
return {
|
93 |
"inference.warmup_runs": gr.Slider(
|
|
|
88 |
}
|
89 |
|
90 |
|
91 |
+
def get_ipex_config():
|
92 |
+
return {}
|
93 |
+
|
94 |
+
|
95 |
def get_inference_config():
|
96 |
return {
|
97 |
"inference.warmup_runs": gr.Slider(
|
packages.txt
ADDED
File without changes
|
pyproject.toml
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
[tool.black]
|
2 |
-
line-length = 119
|
3 |
-
target-version = ['py37']
|
|
|
|
|
|
|
|