Spaces:
Runtime error
Runtime error
gabrielaltay
commited on
Commit
•
43b5ceb
1
Parent(s):
15b02c1
unify token counting
Browse files- app.py +1 -1
- sidebar_mod.py +7 -2
- usage_mod.py +23 -21
app.py
CHANGED
@@ -305,7 +305,7 @@ def render_response(
|
|
305 |
st.info(response_text)
|
306 |
|
307 |
usage_mod.display_api_usage(
|
308 |
-
response["aimessage"]
|
309 |
)
|
310 |
doc_format_mod.render_retrieved_chunks(response["docs"], tag=tag)
|
311 |
|
|
|
305 |
st.info(response_text)
|
306 |
|
307 |
usage_mod.display_api_usage(
|
308 |
+
response["aimessage"], model_info, provider, tag=tag
|
309 |
)
|
310 |
doc_format_mod.render_retrieved_chunks(response["docs"], tag=tag)
|
311 |
|
sidebar_mod.py
CHANGED
@@ -8,13 +8,18 @@ def render_outreach_links():
|
|
8 |
hf_url = "https://huggingface.co/hyperdemocracy"
|
9 |
pc_url = "https://www.pinecone.io/blog/serverless"
|
10 |
together_url = "https://www.together.ai/"
|
|
|
|
|
|
|
11 |
st.subheader(":brain: About [hyperdemocracy](https://hyperdemocracy.us)")
|
12 |
st.subheader(f":world_map: Visualize [nomic atlas]({nomic_url})")
|
13 |
st.subheader(f":hugging_face: Raw [huggingface datasets]({hf_url})")
|
14 |
st.subheader(f":evergreen_tree: Index [pinecone serverless]({pc_url})")
|
15 |
st.subheader(f":pancakes: Inference [together.ai]({together_url})")
|
16 |
-
|
17 |
-
|
|
|
|
|
18 |
def render_sidebar():
|
19 |
with st.container(border=True):
|
20 |
render_outreach_links()
|
|
|
8 |
hf_url = "https://huggingface.co/hyperdemocracy"
|
9 |
pc_url = "https://www.pinecone.io/blog/serverless"
|
10 |
together_url = "https://www.together.ai/"
|
11 |
+
google_gemini_url = "https://ai.google.dev/gemini-api"
|
12 |
+
anthropic_url = "https://www.anthropic.com/api"
|
13 |
+
openai_url = "https://platform.openai.com/docs/overview"
|
14 |
st.subheader(":brain: About [hyperdemocracy](https://hyperdemocracy.us)")
|
15 |
st.subheader(f":world_map: Visualize [nomic atlas]({nomic_url})")
|
16 |
st.subheader(f":hugging_face: Raw [huggingface datasets]({hf_url})")
|
17 |
st.subheader(f":evergreen_tree: Index [pinecone serverless]({pc_url})")
|
18 |
st.subheader(f":pancakes: Inference [together.ai]({together_url})")
|
19 |
+
st.subheader(f":eyeglasses: Inference [google-gemini]({google_gemini_url})")
|
20 |
+
st.subheader(f":hut: Inference [anthropic]({anthropic_url})")
|
21 |
+
st.subheader(f":sparkles: Inference [openai]({openai_url})")
|
22 |
+
|
23 |
def render_sidebar():
|
24 |
with st.container(border=True):
|
25 |
render_outreach_links()
|
usage_mod.py
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
import streamlit as st
|
|
|
2 |
|
3 |
|
4 |
-
def get_openai_token_usage(
|
5 |
-
input_tokens =
|
6 |
-
output_tokens =
|
7 |
cost = (
|
8 |
input_tokens * 1e-6 * model_info["cost"]["pmi"]
|
9 |
+ output_tokens * 1e-6 * model_info["cost"]["pmo"]
|
@@ -15,9 +16,9 @@ def get_openai_token_usage(response_metadata: dict, model_info: dict):
|
|
15 |
}
|
16 |
|
17 |
|
18 |
-
def get_anthropic_token_usage(
|
19 |
-
input_tokens =
|
20 |
-
output_tokens =
|
21 |
cost = (
|
22 |
input_tokens * 1e-6 * model_info["cost"]["pmi"]
|
23 |
+ output_tokens * 1e-6 * model_info["cost"]["pmo"]
|
@@ -29,9 +30,9 @@ def get_anthropic_token_usage(response_metadata: dict, model_info: dict):
|
|
29 |
}
|
30 |
|
31 |
|
32 |
-
def get_together_token_usage(
|
33 |
-
input_tokens =
|
34 |
-
output_tokens =
|
35 |
cost = (
|
36 |
input_tokens * 1e-6 * model_info["cost"]["pmi"]
|
37 |
+ output_tokens * 1e-6 * model_info["cost"]["pmo"]
|
@@ -43,9 +44,9 @@ def get_together_token_usage(response_metadata: dict, model_info: dict):
|
|
43 |
}
|
44 |
|
45 |
|
46 |
-
def get_google_token_usage(
|
47 |
-
input_tokens =
|
48 |
-
output_tokens =
|
49 |
cost = (
|
50 |
input_tokens * 1e-6 * model_info["cost"]["pmi"]
|
51 |
+ output_tokens * 1e-6 * model_info["cost"]["pmo"]
|
@@ -57,29 +58,29 @@ def get_google_token_usage(response_metadata: dict, model_info: dict):
|
|
57 |
}
|
58 |
|
59 |
|
60 |
-
def get_token_usage(
|
61 |
match provider:
|
62 |
case "OpenAI":
|
63 |
-
return get_openai_token_usage(
|
64 |
case "Anthropic":
|
65 |
-
return get_anthropic_token_usage(
|
66 |
case "Together":
|
67 |
-
return get_together_token_usage(
|
68 |
case "Google":
|
69 |
-
return get_google_token_usage(
|
70 |
case _:
|
71 |
raise ValueError()
|
72 |
|
73 |
|
74 |
def display_api_usage(
|
75 |
-
|
76 |
):
|
77 |
with st.container(border=True):
|
78 |
if tag is None:
|
79 |
st.write("API Usage")
|
80 |
else:
|
81 |
st.write(f"API Usage ({tag})")
|
82 |
-
token_usage = get_token_usage(
|
83 |
col1, col2, col3 = st.columns(3)
|
84 |
with col1:
|
85 |
st.metric("Input Tokens", token_usage["input_tokens"])
|
@@ -87,5 +88,6 @@ def display_api_usage(
|
|
87 |
st.metric("Output Tokens", token_usage["output_tokens"])
|
88 |
with col3:
|
89 |
st.metric("Cost", f"${token_usage['cost']:.4f}")
|
90 |
-
with st.expander("
|
91 |
-
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from langchain_core.messages import AIMessage
|
3 |
|
4 |
|
5 |
+
def get_openai_token_usage(aimessage: AIMessage, model_info: dict):
|
6 |
+
input_tokens = aimessage.usage_metadata["input_tokens"]
|
7 |
+
output_tokens = aimessage.usage_metadata["output_tokens"]
|
8 |
cost = (
|
9 |
input_tokens * 1e-6 * model_info["cost"]["pmi"]
|
10 |
+ output_tokens * 1e-6 * model_info["cost"]["pmo"]
|
|
|
16 |
}
|
17 |
|
18 |
|
19 |
+
def get_anthropic_token_usage(aimessage: AIMessage, model_info: dict):
|
20 |
+
input_tokens = aimessage.usage_metadata["input_tokens"]
|
21 |
+
output_tokens = aimessage.usage_metadata["output_tokens"]
|
22 |
cost = (
|
23 |
input_tokens * 1e-6 * model_info["cost"]["pmi"]
|
24 |
+ output_tokens * 1e-6 * model_info["cost"]["pmo"]
|
|
|
30 |
}
|
31 |
|
32 |
|
33 |
+
def get_together_token_usage(aimessage: AIMessage, model_info: dict):
|
34 |
+
input_tokens = aimessage.usage_metadata["input_tokens"]
|
35 |
+
output_tokens = aimessage.usage_metadata["output_tokens"]
|
36 |
cost = (
|
37 |
input_tokens * 1e-6 * model_info["cost"]["pmi"]
|
38 |
+ output_tokens * 1e-6 * model_info["cost"]["pmo"]
|
|
|
44 |
}
|
45 |
|
46 |
|
47 |
+
def get_google_token_usage(aimessage: AIMessage, model_info: dict):
|
48 |
+
input_tokens = aimessage.usage_metadata["input_tokens"]
|
49 |
+
output_tokens = aimessage.usage_metadata["output_tokens"]
|
50 |
cost = (
|
51 |
input_tokens * 1e-6 * model_info["cost"]["pmi"]
|
52 |
+ output_tokens * 1e-6 * model_info["cost"]["pmo"]
|
|
|
58 |
}
|
59 |
|
60 |
|
61 |
+
def get_token_usage(aimessage: AIMessage, model_info: dict, provider: str):
|
62 |
match provider:
|
63 |
case "OpenAI":
|
64 |
+
return get_openai_token_usage(aimessage, model_info)
|
65 |
case "Anthropic":
|
66 |
+
return get_anthropic_token_usage(aimessage, model_info)
|
67 |
case "Together":
|
68 |
+
return get_together_token_usage(aimessage, model_info)
|
69 |
case "Google":
|
70 |
+
return get_google_token_usage(aimessage, model_info)
|
71 |
case _:
|
72 |
raise ValueError()
|
73 |
|
74 |
|
75 |
def display_api_usage(
|
76 |
+
aimessage: AIMessage, model_info: dict, provider: str, tag: str | None = None
|
77 |
):
|
78 |
with st.container(border=True):
|
79 |
if tag is None:
|
80 |
st.write("API Usage")
|
81 |
else:
|
82 |
st.write(f"API Usage ({tag})")
|
83 |
+
token_usage = get_token_usage(aimessage, model_info, provider)
|
84 |
col1, col2, col3 = st.columns(3)
|
85 |
with col1:
|
86 |
st.metric("Input Tokens", token_usage["input_tokens"])
|
|
|
88 |
st.metric("Output Tokens", token_usage["output_tokens"])
|
89 |
with col3:
|
90 |
st.metric("Cost", f"${token_usage['cost']:.4f}")
|
91 |
+
with st.expander("AIMessage Metadata"):
|
92 |
+
dd = {key: val for key, val in aimessage.dict().items() if key != "content"}
|
93 |
+
st.write(dd)
|