jadehardouin
commited on
Commit
•
2b78da8
1
Parent(s):
437efc5
Update app.py
Browse files
app.py
CHANGED
@@ -1,5 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
|
|
|
|
|
|
3 |
|
4 |
text = "<h1 style='text-align: center; color: blue; font-size: 30px;'>TCO Comparison Calculator"
|
5 |
text1 = "<h1 style='text-align: center; color: blue; font-size: 20px;'>First solution"
|
@@ -88,25 +91,26 @@ def update_plot(diy_value, saas_value):
|
|
88 |
)
|
89 |
return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
|
90 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
description=f"""
|
92 |
<p>In this demo application, we help you compare different solutions for your AI incorporation plans, such as open-source or SaaS.</p>
|
93 |
<p>First, you'll have to choose the two solutions you'd like to compare. Then, follow the instructions to select your configurations for each solution and we will compute the cost/request accordingly to them. Eventually, you can compare both solutions to evaluate which one best suits your needs, in the short or long term.</p>
|
94 |
"""
|
95 |
-
|
96 |
-
|
97 |
-
description2_intro="This interface provides you with the cost per token resulting from the AI model provider you choose and the number of tokens you select for context, which the model will take into account when processing input texts."
|
98 |
description3="This interface compares the cost per request for the two solutions you selected and gives you an insight of whether a solution is more valuable in the long term."
|
99 |
|
100 |
-
latex_formula = r"$ saas\_cost\_per\_request = saas\_cost\_per\_token \times tokens\_per\_request$"
|
101 |
-
latex_formula2 = r"$ open\-source\_cost\_per\_request = \frac{tokens\_per\_request \times VM\_cost\_per\_hour \times (1 - reduction)}{tokens\_per\_second \times 3600 \times maxed\_out \times used}$"
|
102 |
-
|
103 |
-
tooltip_html = "<span style='color: blue; cursor: help;' title='The formula used for computations is here:{latex_formula}'>Hover here to see formula</span>"
|
104 |
-
tooltip_html2 = "<span style='color: blue; cursor: help;' title='The formula used for computations is here:{latex_formula2} with a VM A100 40GB, supposedly maxed out at 80% and utilized 50% of the day'>Hover here to see formula</span>"
|
105 |
-
|
106 |
-
description1 = f"{description1_intro} {tooltip_html2}"
|
107 |
-
description2 = f"{description2_intro} {tooltip_html}"
|
108 |
-
|
109 |
-
|
110 |
models = ["Llama-2-7B", "Llama-2-13B", "Llama-2-70B"]
|
111 |
vm_rental_choice = ["pay as you go", "1 year reserved", "3 years reserved"]
|
112 |
model_provider = ["OpenAI"]
|
@@ -125,7 +129,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
125 |
with gr.Row():
|
126 |
with gr.Column():
|
127 |
|
128 |
-
solution_selection = gr.Dropdown(["SaaS", "Open-source"], label="Select a Solution")
|
129 |
|
130 |
with gr.Row(visible=False) as title_column:
|
131 |
gr.Markdown(value=text1)
|
@@ -133,30 +137,44 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
133 |
with gr.Row(visible=False) as text_diy_column:
|
134 |
gr.Markdown(description1)
|
135 |
|
|
|
|
|
|
|
|
|
|
|
136 |
with gr.Row(visible=False) as input_diy_column:
|
137 |
-
model_inp = gr.Dropdown(models, label="Select an AI Model")
|
138 |
-
rental_plan_inp = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan")
|
139 |
rental_plan_inp.change(fn=calculate_tco, inputs=[model_inp, rental_plan_inp, out_diy], outputs=out_diy)
|
140 |
|
141 |
with gr.Row(visible=False) as text_saas_column:
|
142 |
gr.Markdown(description2)
|
143 |
|
|
|
|
|
|
|
|
|
|
|
144 |
with gr.Row(visible=False) as input_saas_column:
|
145 |
-
model_provider_inp = gr.Dropdown(model_provider, label="Model Provider")
|
146 |
-
context_inp = gr.Dropdown(context, label="Context")
|
147 |
context_inp.change(fn=calculate_tco_2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
|
148 |
|
149 |
def submit(solution_selection):
|
150 |
if solution_selection == "Open-source":
|
151 |
return {
|
|
|
152 |
title_column: gr.update(visible=True),
|
153 |
text_diy_column: gr.update(visible=True),
|
154 |
input_diy_column: gr.update(visible=True),
|
|
|
155 |
text_saas_column: gr.update(visible=False),
|
156 |
input_saas_column: gr.update(visible=False),
|
157 |
}
|
158 |
else:
|
159 |
return {
|
|
|
|
|
160 |
text_diy_column: gr.update(visible=False),
|
161 |
input_diy_column: gr.update(visible=False),
|
162 |
title_column: gr.update(visible=True),
|
@@ -167,14 +185,14 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
167 |
solution_selection.change(
|
168 |
submit,
|
169 |
solution_selection,
|
170 |
-
[out_saas, text_diy_column, title_column, text_saas_column, model_inp, rental_plan_inp, model_provider_inp, context_inp, input_diy_column, input_saas_column],
|
171 |
)
|
172 |
|
173 |
# gr.Divider(style="vertical", thickness=2, color="blue")
|
174 |
|
175 |
with gr.Column():
|
176 |
|
177 |
-
solution_selection2 = gr.Dropdown(["SaaS", "Open-source"], label="Select a Solution")
|
178 |
|
179 |
with gr.Row(visible=False) as title_column2:
|
180 |
gr.Markdown(value=text2)
|
@@ -182,33 +200,47 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
182 |
with gr.Row(visible=False) as text_diy_column2:
|
183 |
gr.Markdown(description1)
|
184 |
|
|
|
|
|
|
|
|
|
|
|
185 |
with gr.Row(visible=False) as input_diy_column2:
|
186 |
-
model_inp2 = gr.Dropdown(models, label="Select an AI Model")
|
187 |
-
rental_plan_inp2 = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan")
|
188 |
rental_plan_inp2.change(fn=calculate_tco, inputs=[model_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
|
189 |
|
190 |
with gr.Row(visible=False) as text_saas_column2:
|
191 |
gr.Markdown(description2)
|
192 |
|
|
|
|
|
|
|
|
|
|
|
193 |
with gr.Row(visible=False) as input_saas_column2:
|
194 |
-
model_provider_inp2 = gr.Dropdown(['OpenAI'], label="Model Provider")
|
195 |
-
context_inp2 = gr.Dropdown(['4K context', '16K context'], label="Context")
|
196 |
context_inp2.change(fn=calculate_tco_2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
|
197 |
|
198 |
def submit2(solution_selection2):
|
199 |
if solution_selection2 == "Open-source":
|
200 |
return {
|
|
|
201 |
title_column2: gr.update(visible=True),
|
202 |
text_diy_column2: gr.update(visible=True),
|
203 |
input_diy_column2: gr.update(visible=True),
|
|
|
204 |
text_saas_column2: gr.update(visible=False),
|
205 |
input_saas_column2: gr.update(visible=False),
|
206 |
}
|
207 |
else:
|
208 |
return {
|
|
|
209 |
text_diy_column2: gr.update(visible=False),
|
210 |
input_diy_column2: gr.update(visible=False),
|
211 |
title_column2: gr.update(visible=True),
|
|
|
212 |
text_saas_column2: gr.update(visible=True),
|
213 |
input_saas_column2: gr.update(visible=True),
|
214 |
}
|
@@ -216,7 +248,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
216 |
solution_selection2.change(
|
217 |
submit2,
|
218 |
solution_selection2,
|
219 |
-
[out_diy2, out_saas2, title_column2, text_diy_column2, text_saas_column2, model_inp2, rental_plan_inp2, model_provider_inp2, context_inp2, input_diy_column2, input_saas_column2],
|
220 |
)
|
221 |
|
222 |
with gr.Row():
|
@@ -226,16 +258,16 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
|
226 |
gr.Markdown(text3)
|
227 |
|
228 |
with gr.Row():
|
229 |
-
plot = gr.BarPlot(title="Comparison", y_title="Cost/token ($)",
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
|
241 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
+
import matplotlib.pyplot as plt
|
4 |
+
import io
|
5 |
+
import base64
|
6 |
|
7 |
text = "<h1 style='text-align: center; color: blue; font-size: 30px;'>TCO Comparison Calculator"
|
8 |
text1 = "<h1 style='text-align: center; color: blue; font-size: 20px;'>First solution"
|
|
|
91 |
)
|
92 |
return gr.BarPlot.update(data, x="Solution", y="Cost/token ($)")
|
93 |
|
94 |
+
def render_latex(latex_str):
|
95 |
+
fig, ax = plt.subplots(figsize=(1, 1))
|
96 |
+
ax.text(0.5, 0.5, f"${latex_str}$", size=12, usetex=True, va="center", ha="center")
|
97 |
+
ax.axis("off")
|
98 |
+
|
99 |
+
buf = io.BytesIO()
|
100 |
+
plt.savefig(buf, format="png")
|
101 |
+
plt.close(fig)
|
102 |
+
|
103 |
+
base64_str = base64.b64encode(buf.getvalue()).decode("utf-8")
|
104 |
+
return f"<img src='data:image/png;base64,{base64_str}'>"
|
105 |
+
|
106 |
description=f"""
|
107 |
<p>In this demo application, we help you compare different solutions for your AI incorporation plans, such as open-source or SaaS.</p>
|
108 |
<p>First, you'll have to choose the two solutions you'd like to compare. Then, follow the instructions to select your configurations for each solution and we will compute the cost/request accordingly to them. Eventually, you can compare both solutions to evaluate which one best suits your needs, in the short or long term.</p>
|
109 |
"""
|
110 |
+
description1="This interface provides you with the cost per token you get using the open-source solution, based on the model you choose to use and how long you're planning to use it. The selected prices for a Virtual Machine rental come from Azure's VM rental plans, which can offer reductions for long-term reserved usage."
|
111 |
+
description2="This interface provides you with the cost per token resulting from the AI model provider you choose and the number of tokens you select for context, which the model will take into account when processing input texts."
|
|
|
112 |
description3="This interface compares the cost per request for the two solutions you selected and gives you an insight of whether a solution is more valuable in the long term."
|
113 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
114 |
models = ["Llama-2-7B", "Llama-2-13B", "Llama-2-70B"]
|
115 |
vm_rental_choice = ["pay as you go", "1 year reserved", "3 years reserved"]
|
116 |
model_provider = ["OpenAI"]
|
|
|
129 |
with gr.Row():
|
130 |
with gr.Column():
|
131 |
|
132 |
+
solution_selection = gr.Dropdown(["SaaS", "Open-source"], label="Select a Solution", value="SaaS")
|
133 |
|
134 |
with gr.Row(visible=False) as title_column:
|
135 |
gr.Markdown(value=text1)
|
|
|
137 |
with gr.Row(visible=False) as text_diy_column:
|
138 |
gr.Markdown(description1)
|
139 |
|
140 |
+
with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_diy:
|
141 |
+
gr.Markdown(
|
142 |
+
r"$ opensource\_cost\_per\_request = \frac{tokens\_per\_request \times VM\_cost\_per\_hour \times (1 - reduction)}{tokens\_per\_second \times 3600 \times maxed\_out \times used}$"
|
143 |
+
)
|
144 |
+
|
145 |
with gr.Row(visible=False) as input_diy_column:
|
146 |
+
model_inp = gr.Dropdown(models, label="Select an AI Model", value="Llama-2-7B", info="Open-source AI model used for your application")
|
147 |
+
rental_plan_inp = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", value="pay as you go", info="These options are from Azure's VM rental plans")
|
148 |
rental_plan_inp.change(fn=calculate_tco, inputs=[model_inp, rental_plan_inp, out_diy], outputs=out_diy)
|
149 |
|
150 |
with gr.Row(visible=False) as text_saas_column:
|
151 |
gr.Markdown(description2)
|
152 |
|
153 |
+
with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_saas:
|
154 |
+
gr.Markdown(
|
155 |
+
r"$ saas\_cost\_per\_request = saas\_cost\_per\_token \times tokens\_per\_request$"
|
156 |
+
)
|
157 |
+
|
158 |
with gr.Row(visible=False) as input_saas_column:
|
159 |
+
model_provider_inp = gr.Dropdown(model_provider, label="Model Provider", vallue="OpenAI", info="Choose an AI model provider you want to work with")
|
160 |
+
context_inp = gr.Dropdown(context, label="Context", value="4K context", info="Number of tokens the model considers when processing text")
|
161 |
context_inp.change(fn=calculate_tco_2, inputs=[model_provider_inp, context_inp, out_saas], outputs=out_saas)
|
162 |
|
163 |
def submit(solution_selection):
|
164 |
if solution_selection == "Open-source":
|
165 |
return {
|
166 |
+
formula_diy: gr.update(visible=True),
|
167 |
title_column: gr.update(visible=True),
|
168 |
text_diy_column: gr.update(visible=True),
|
169 |
input_diy_column: gr.update(visible=True),
|
170 |
+
formula_saas: gr.update(visible=False),
|
171 |
text_saas_column: gr.update(visible=False),
|
172 |
input_saas_column: gr.update(visible=False),
|
173 |
}
|
174 |
else:
|
175 |
return {
|
176 |
+
formula_saas: gr.update(visible=True),
|
177 |
+
formula_diy: gr.update(visible=False),
|
178 |
text_diy_column: gr.update(visible=False),
|
179 |
input_diy_column: gr.update(visible=False),
|
180 |
title_column: gr.update(visible=True),
|
|
|
185 |
solution_selection.change(
|
186 |
submit,
|
187 |
solution_selection,
|
188 |
+
[out_saas, text_diy_column, formula_diy, formula_saas, title_column, text_saas_column, model_inp, rental_plan_inp, model_provider_inp, context_inp, input_diy_column, input_saas_column],
|
189 |
)
|
190 |
|
191 |
# gr.Divider(style="vertical", thickness=2, color="blue")
|
192 |
|
193 |
with gr.Column():
|
194 |
|
195 |
+
solution_selection2 = gr.Dropdown(["SaaS", "Open-source"], value="Open-source", label="Select a Solution")
|
196 |
|
197 |
with gr.Row(visible=False) as title_column2:
|
198 |
gr.Markdown(value=text2)
|
|
|
200 |
with gr.Row(visible=False) as text_diy_column2:
|
201 |
gr.Markdown(description1)
|
202 |
|
203 |
+
with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_diy2:
|
204 |
+
gr.Markdown(
|
205 |
+
r"$ homemade\_cost\_per\_request = \frac{tokens\_per\_request \times VM\_cost\_per\_hour \times (1 - reduction)}{tokens\_per\_second \times 3600 \times maxed\_out \times used}$"
|
206 |
+
)
|
207 |
+
|
208 |
with gr.Row(visible=False) as input_diy_column2:
|
209 |
+
model_inp2 = gr.Dropdown(models, label="Select an AI Model", value="Llama-2-7B", info="Open-source AI model used for your application")
|
210 |
+
rental_plan_inp2 = gr.Dropdown(vm_rental_choice, label="Select a VM Rental Plan", value="pay as you go", info="These options are from Azure's VM rental plans")
|
211 |
rental_plan_inp2.change(fn=calculate_tco, inputs=[model_inp2, rental_plan_inp2, out_diy2], outputs=out_diy2)
|
212 |
|
213 |
with gr.Row(visible=False) as text_saas_column2:
|
214 |
gr.Markdown(description2)
|
215 |
|
216 |
+
with gr.Accordion("Open to see the formula", visible=False, open=False) as formula_saas2:
|
217 |
+
gr.Markdown(
|
218 |
+
r"$ saas\_cost\_per\_request = saas\_cost\_per\_token \times tokens\_per\_request$"
|
219 |
+
)
|
220 |
+
|
221 |
with gr.Row(visible=False) as input_saas_column2:
|
222 |
+
model_provider_inp2 = gr.Dropdown(['OpenAI'], label="Model Provider", value="OpenAI", info="Choose an AI model provider you want to work with")
|
223 |
+
context_inp2 = gr.Dropdown(['4K context', '16K context'], label="Context", value="4K context", info="Number of tokens the model considers when processing text")
|
224 |
context_inp2.change(fn=calculate_tco_2, inputs=[model_provider_inp2, context_inp2, out_saas2], outputs=out_saas2)
|
225 |
|
226 |
def submit2(solution_selection2):
|
227 |
if solution_selection2 == "Open-source":
|
228 |
return {
|
229 |
+
formula_diy2: gr.update(visible=True),
|
230 |
title_column2: gr.update(visible=True),
|
231 |
text_diy_column2: gr.update(visible=True),
|
232 |
input_diy_column2: gr.update(visible=True),
|
233 |
+
formula_saas2: gr.update(visible=False),
|
234 |
text_saas_column2: gr.update(visible=False),
|
235 |
input_saas_column2: gr.update(visible=False),
|
236 |
}
|
237 |
else:
|
238 |
return {
|
239 |
+
formula_diy2: gr.update(visible=False),
|
240 |
text_diy_column2: gr.update(visible=False),
|
241 |
input_diy_column2: gr.update(visible=False),
|
242 |
title_column2: gr.update(visible=True),
|
243 |
+
formula_saas2: gr.update(visible=True),
|
244 |
text_saas_column2: gr.update(visible=True),
|
245 |
input_saas_column2: gr.update(visible=True),
|
246 |
}
|
|
|
248 |
solution_selection2.change(
|
249 |
submit2,
|
250 |
solution_selection2,
|
251 |
+
[out_diy2, out_saas2, formula_diy2, formula_saas2, title_column2, text_diy_column2, text_saas_column2, model_inp2, rental_plan_inp2, model_provider_inp2, context_inp2, input_diy_column2, input_saas_column2],
|
252 |
)
|
253 |
|
254 |
with gr.Row():
|
|
|
258 |
gr.Markdown(text3)
|
259 |
|
260 |
with gr.Row():
|
261 |
+
plot = gr.BarPlot(vertical=False, title="Comparison", y_title="Cost/token ($)", width=500, interactive=True)
|
262 |
+
|
263 |
+
context_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
264 |
+
model_provider_inp.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
265 |
+
rental_plan_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
266 |
+
model_inp2.change(fn=update_plot, inputs=[out_diy2, out_saas], outputs=plot)
|
267 |
+
|
268 |
+
context_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
269 |
+
model_provider_inp2.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
270 |
+
rental_plan_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
271 |
+
model_inp.change(fn=update_plot, inputs=[out_diy, out_saas2], outputs=plot)
|
272 |
|
273 |
demo.launch()
|