Spaces:
Running
Running
Better app, thx Claude
Browse files- app.py +90 -25
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,40 +1,105 @@
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
|
|
|
|
|
|
3 |
|
4 |
-
|
5 |
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
|
|
14 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
prompt = gr.Textbox(value="Lorem ipsum dolor sit amet...")
|
23 |
-
completion = gr.Textbox(value="...")
|
24 |
|
25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
with gr.Row():
|
34 |
-
|
35 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
36 |
|
37 |
-
|
|
|
|
|
|
|
|
|
|
|
38 |
|
39 |
if __name__ == "__main__":
|
40 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import pandas as pd
|
3 |
+
import requests
|
4 |
+
import json
|
5 |
+
import tiktoken
|
6 |
|
7 |
+
PRICES_URL = "https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json"
|
8 |
|
9 |
+
# Ensure TOKEN_COSTS is up to date when the module is loaded
|
10 |
+
try:
|
11 |
+
response = requests.get(PRICES_URL)
|
12 |
+
if response.status_code == 200:
|
13 |
+
TOKEN_COSTS = response.json()
|
14 |
+
else:
|
15 |
+
raise Exception(f"Failed to fetch token costs, status code: {response.status_code}")
|
16 |
+
except Exception as e:
|
17 |
+
print(f'Failed to update token costs with error: {e}. Using static costs.')
|
18 |
+
with open("model_prices.json", "r") as f:
|
19 |
+
TOKEN_COSTS = json.load(f)
|
20 |
|
21 |
+
TOKEN_COSTS = pd.DataFrame.from_dict(TOKEN_COSTS, orient='index').reset_index()
|
22 |
+
TOKEN_COSTS.columns = ['model'] + list(TOKEN_COSTS.columns[1:])
|
23 |
|
24 |
+
def count_string_tokens(string: str, model: str) -> int:
|
25 |
+
"""Returns the number of tokens in a text string."""
|
26 |
+
try:
|
27 |
+
encoding = tiktoken.encoding_for_model(model.split('/')[-1])
|
28 |
+
except KeyError:
|
29 |
+
print(f"Model {model} not found. Using cl100k_base encoding.")
|
30 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
31 |
+
return len(encoding.encode(string))
|
32 |
|
33 |
+
def calculate_total_cost(prompt_tokens: int, completion_tokens: int, model: str) -> float:
|
34 |
+
"""Calculate the total cost for a given model and number of tokens."""
|
35 |
+
model_data = TOKEN_COSTS[TOKEN_COSTS['model'] == model].iloc[0]
|
36 |
+
prompt_cost = prompt_tokens * model_data['input_cost_per_token']
|
37 |
+
completion_cost = completion_tokens * model_data['output_cost_per_token']
|
38 |
+
return prompt_cost + completion_cost
|
|
|
|
|
39 |
|
40 |
+
def update_model_list(function_calling, litellm_provider, max_price):
|
41 |
+
filtered_models = TOKEN_COSTS[
|
42 |
+
(TOKEN_COSTS['supports_function_calling'] == function_calling) &
|
43 |
+
(TOKEN_COSTS['litellm_provider'] == litellm_provider) &
|
44 |
+
(TOKEN_COSTS['input_cost_per_token'] + TOKEN_COSTS['output_cost_per_token'] <= max_price)
|
45 |
+
]
|
46 |
+
return filtered_models['model'].tolist()
|
47 |
|
48 |
+
def compute_all(prompt_string, completion_string, model):
|
49 |
+
prompt_tokens = count_string_tokens(prompt_string, model)
|
50 |
+
completion_tokens = count_string_tokens(completion_string, model)
|
51 |
+
cost = calculate_total_cost(prompt_tokens, completion_tokens, model)
|
52 |
+
prompt_cost = prompt_tokens * TOKEN_COSTS[TOKEN_COSTS['model'] == model]['input_cost_per_token'].values[0]
|
53 |
+
completion_cost = completion_tokens * TOKEN_COSTS[TOKEN_COSTS['model'] == model]['output_cost_per_token'].values[0]
|
54 |
+
|
55 |
+
return (
|
56 |
+
f"{prompt_tokens} tokens",
|
57 |
+
f"${prompt_cost:.6f}",
|
58 |
+
f"{completion_tokens} tokens",
|
59 |
+
f"${completion_cost:.6f}",
|
60 |
+
f"${cost:.6f}"
|
61 |
+
)
|
62 |
|
63 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
64 |
+
gr.Markdown("""
|
65 |
+
# Text-to-$$$: Calculate the price of your LLM runs
|
66 |
+
Based on data from [litellm](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json).
|
67 |
+
""")
|
68 |
+
|
69 |
+
with gr.Row():
|
70 |
+
with gr.Column(scale=2):
|
71 |
+
prompt = gr.Textbox(label="Prompt", value="Tell me a joke about AI.", lines=3)
|
72 |
+
completion = gr.Textbox(label="Completion", value="Here's a joke about AI: Why did the AI go to therapy? It had too many deep issues!", lines=3)
|
73 |
+
|
74 |
with gr.Row():
|
75 |
+
function_calling = gr.Checkbox(label="Supports Function Calling")
|
76 |
+
litellm_provider = gr.Dropdown(label="LiteLLM Provider", choices=TOKEN_COSTS['litellm_provider'].unique().tolist())
|
77 |
+
|
78 |
+
max_price = gr.Slider(label="Max Price per Token (input + output)", minimum=0, maximum=0.001, step=0.00001, value=0.001)
|
79 |
+
|
80 |
+
model = gr.Dropdown(label="Model", choices=TOKEN_COSTS['model'].tolist())
|
81 |
+
|
82 |
+
compute_button = gr.Button("Compute Costs", variant="primary")
|
83 |
+
|
84 |
+
with gr.Column(scale=1):
|
85 |
+
with gr.Group():
|
86 |
+
prompt_tokens = gr.Textbox(label="Prompt Tokens", interactive=False)
|
87 |
+
prompt_cost = gr.Textbox(label="Prompt Cost", interactive=False)
|
88 |
+
completion_tokens = gr.Textbox(label="Completion Tokens", interactive=False)
|
89 |
+
completion_cost = gr.Textbox(label="Completion Cost", interactive=False)
|
90 |
+
total_cost = gr.Textbox(label="Total Cost", interactive=False)
|
91 |
+
|
92 |
+
# Update model list based on criteria
|
93 |
+
function_calling.change(update_model_list, inputs=[function_calling, litellm_provider, max_price], outputs=model)
|
94 |
+
litellm_provider.change(update_model_list, inputs=[function_calling, litellm_provider, max_price], outputs=model)
|
95 |
+
max_price.change(update_model_list, inputs=[function_calling, litellm_provider, max_price], outputs=model)
|
96 |
|
97 |
+
# Compute costs
|
98 |
+
compute_button.click(
|
99 |
+
compute_all,
|
100 |
+
inputs=[prompt, completion, model],
|
101 |
+
outputs=[prompt_tokens, prompt_cost, completion_tokens, completion_cost, total_cost]
|
102 |
+
)
|
103 |
|
104 |
if __name__ == "__main__":
|
105 |
demo.launch()
|
requirements.txt
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
pandas
|
|
|
|
1 |
+
pandas
|
2 |
+
tiktoken
|