m-ric HF staff commited on
Commit
b9296d6
·
1 Parent(s): 099bf82

Add vision support

Browse files
Files changed (1) hide show
  1. app.py +20 -15
app.py CHANGED
@@ -21,6 +21,7 @@ except Exception as e:
21
  TOKEN_COSTS = pd.DataFrame.from_dict(TOKEN_COSTS, orient='index').reset_index()
22
  TOKEN_COSTS.columns = ['model'] + list(TOKEN_COSTS.columns[1:])
23
  TOKEN_COSTS = TOKEN_COSTS.loc[~TOKEN_COSTS["model"].str.contains("sample_spec")]
 
24
 
25
 
26
  def count_string_tokens(string: str, model: str) -> int:
@@ -37,15 +38,17 @@ def calculate_total_cost(prompt_tokens: int, completion_tokens: int, model: str)
37
  completion_cost = completion_tokens * model_data['output_cost_per_token']
38
  return prompt_cost, completion_cost
39
 
40
- def update_model_list(function_calling, litellm_provider, max_price):
41
- if litellm_provider == "Any":
42
- return gr.Dropdown(choices=TOKEN_COSTS['model'].tolist(), value=list_models[0])
43
- else:
44
- filtered_models = TOKEN_COSTS[
45
- (TOKEN_COSTS['litellm_provider'] == litellm_provider)
46
- ]
47
- list_models = filtered_models['model'].tolist()
48
- return gr.Dropdown(choices=list_models, value=list_models[0])
 
 
49
 
50
  def compute_all(input_type, prompt_text, completion_text, prompt_tokens, completion_tokens, model):
51
  if input_type == "Text Input":
@@ -85,7 +88,9 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.yellow, seconda
85
 
86
  gr.Markdown("## Model choice:")
87
  with gr.Row():
88
- function_calling = gr.Checkbox(label="Supports Function Calling", value=False)
 
 
89
  litellm_provider = gr.Dropdown(label="LiteLLM Provider", choices=["Any"] + TOKEN_COSTS['litellm_provider'].unique().tolist(), value="Any")
90
 
91
  max_price = gr.Slider(label="Max Price per Token (input + output)", minimum=0, maximum=0.001, step=0.00001, value=0.001)
@@ -113,9 +118,10 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.yellow, seconda
113
  )
114
 
115
  # Update model list based on criteria
116
- function_calling.change(update_model_list, inputs=[function_calling, litellm_provider, max_price], outputs=model)
117
- litellm_provider.change(update_model_list, inputs=[function_calling, litellm_provider, max_price], outputs=model)
118
- max_price.change(update_model_list, inputs=[function_calling, litellm_provider, max_price], outputs=model)
 
119
 
120
  # Compute costs
121
  compute_button.click(
@@ -132,5 +138,4 @@ with gr.Blocks(theme=gr.themes.Soft(primary_hue=gr.themes.colors.yellow, seconda
132
  )
133
 
134
  if __name__ == "__main__":
135
- demo.launch()
136
-
 
21
  TOKEN_COSTS = pd.DataFrame.from_dict(TOKEN_COSTS, orient='index').reset_index()
22
  TOKEN_COSTS.columns = ['model'] + list(TOKEN_COSTS.columns[1:])
23
  TOKEN_COSTS = TOKEN_COSTS.loc[~TOKEN_COSTS["model"].str.contains("sample_spec")]
24
+ TOKEN_COSTS = TOKEN_COSTS.loc[~TOKEN_COSTS["input_cost_per_token"].isnull()]
25
 
26
 
27
  def count_string_tokens(string: str, model: str) -> int:
 
38
  completion_cost = completion_tokens * model_data['output_cost_per_token']
39
  return prompt_cost, completion_cost
40
 
41
+ def update_model_list(function_calling, litellm_provider, max_price, supports_vision):
42
+ filtered_models = TOKEN_COSTS
43
+
44
+ if litellm_provider != "Any":
45
+ filtered_models = filtered_models[filtered_models['litellm_provider'] == litellm_provider]
46
+
47
+ if supports_vision:
48
+ filtered_models = filtered_models[filtered_models['supports_vision']]
49
+
50
+ list_models = filtered_models['model'].tolist()
51
+ return gr.Dropdown(choices=list_models, value=list_models[0] if list_models else "No model found for this combination!")
52
 
53
  def compute_all(input_type, prompt_text, completion_text, prompt_tokens, completion_tokens, model):
54
  if input_type == "Text Input":
 
88
 
89
  gr.Markdown("## Model choice:")
90
  with gr.Row():
91
+ with gr.Column():
92
+ function_calling = gr.Checkbox(label="Supports Tool Calling", value=False)
93
+ supports_vision = gr.Checkbox(label="Supports Vision", value=False)
94
  litellm_provider = gr.Dropdown(label="LiteLLM Provider", choices=["Any"] + TOKEN_COSTS['litellm_provider'].unique().tolist(), value="Any")
95
 
96
  max_price = gr.Slider(label="Max Price per Token (input + output)", minimum=0, maximum=0.001, step=0.00001, value=0.001)
 
118
  )
119
 
120
  # Update model list based on criteria
121
+ function_calling.change(update_model_list, inputs=[function_calling, litellm_provider, max_price, supports_vision], outputs=model)
122
+ litellm_provider.change(update_model_list, inputs=[function_calling, litellm_provider, max_price, supports_vision], outputs=model)
123
+ max_price.change(update_model_list, inputs=[function_calling, litellm_provider, max_price, supports_vision], outputs=model)
124
+ supports_vision.change(update_model_list, inputs=[function_calling, litellm_provider, max_price, supports_vision], outputs=model)
125
 
126
  # Compute costs
127
  compute_button.click(
 
138
  )
139
 
140
  if __name__ == "__main__":
141
+ demo.launch()