|
import gradio as gr |
|
|
|
def calculate_tco(model_choice, vm_rental_choice): |
|
VM_cost_per_hour=3.6730 |
|
tokens_per_request = 64 |
|
|
|
if model_choice == "Llama-2-7B": |
|
tokens_per_second=694.38 |
|
|
|
elif model_choice == "Llama-2-13B": |
|
tokens_per_second=1000 |
|
|
|
elif model_choice == "Llama-2-70B": |
|
tokens_per_second=10000 |
|
|
|
if vm_rental_option == "1 month" or "3 months" or "6 months": |
|
reduction = 0 |
|
|
|
elif vm_rental_option == "1 year": |
|
reduction = 0.34 |
|
|
|
elif vm_rental_option == "3 years": |
|
reduction = 0.62 |
|
|
|
homemade_cost_per_request = VM_cost_per_hour * (1 - reduction) * tokens_per_request / (tokens_per_second * 3600) |
|
saas_cost_per_request = 0.018 * tokens_per_request |
|
|
|
output = f"Cost/request with a plan for {vm_rental_choice} using:\n" |
|
output += f"- SaaS solution from OpenAI: ${saas_cost_per_request:.5f}\n" |
|
output += f"- Home-made solution with the model {model_choice}: ${homemade_cost_per_request:.5f}" |
|
return output |
|
|
|
models = ["Llama-2-7B", "Llama-2-13B", "Llama-2-70B"] |
|
vm_rental_option = ["1 month", "3 months", "6 months", "1 year", "3 years"] |
|
|
|
iface = gr.Interface( |
|
fn=calculate_tco, |
|
inputs=[gr.inputs.Dropdown(models, label="Select AI Model"), |
|
gr.inputs.Dropdown(vm_rental_option, label="Select VM Rental Duration")], |
|
outputs=gr.outputs.Textbox() |
|
) |
|
|
|
iface.launch() |