Spaces:
Configuration error
Configuration error
Ahmad-Moiz
commited on
Commit
·
e402b2d
1
Parent(s):
68ccc71
Update app.py
Browse files
app.py
CHANGED
@@ -13,9 +13,10 @@ import streamlit as st
|
|
13 |
from io import StringIO
|
14 |
from llama_index import Document
|
15 |
from langchain.llms import Anthropic
|
|
|
16 |
from langchain.chains import RetrievalQA
|
17 |
from langchain.vectorstores import FAISS
|
18 |
-
|
19 |
from langchain.chat_models import ChatOpenAI
|
20 |
from langchain.retrievers import SVMRetriever
|
21 |
from langchain.chains import QAGenerationChain
|
@@ -135,12 +136,13 @@ def make_llm(model_version: str):
|
|
135 |
chosen_model = ChatOpenAI(model_name=model_version, temperature=0)
|
136 |
elif model_version == "anthropic":
|
137 |
chosen_model = Anthropic(temperature=0)
|
|
|
|
|
138 |
else:
|
139 |
st.warning("`Model version not recognized. Using gpt-3.5-turbo`", icon="⚠️")
|
140 |
chosen_model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
|
141 |
return chosen_model
|
142 |
|
143 |
-
|
144 |
@st.cache_resource
|
145 |
def make_retriever(splits, retriever_type, embedding_type, num_neighbors, _llm):
|
146 |
"""
|
@@ -333,7 +335,12 @@ def run_evaluation(chain, retriever, eval_set, grade_prompt, retriever_type, num
|
|
333 |
# Auth
|
334 |
st.sidebar.image("img/diagnostic.jpg")
|
335 |
|
|
|
|
|
|
|
|
|
336 |
with st.sidebar.form("user_input"):
|
|
|
337 |
num_eval_questions = st.select_slider("`Number of eval questions`",
|
338 |
options=[1, 5, 10, 15, 20], value=5)
|
339 |
|
@@ -352,6 +359,8 @@ with st.sidebar.form("user_input"):
|
|
352 |
("gpt-3.5-turbo",
|
353 |
"gpt-4",
|
354 |
"anthropic"),
|
|
|
|
|
355 |
index=0)
|
356 |
|
357 |
retriever_type = st.radio("`Choose retriever`",
|
@@ -378,12 +387,14 @@ with st.sidebar.form("user_input"):
|
|
378 |
|
379 |
submitted = st.form_submit_button("Submit evaluation")
|
380 |
|
|
|
|
|
381 |
# App
|
382 |
st.header("`Auto-evaluator`")
|
383 |
st.info(
|
384 |
-
"`I am an evaluation tool for question-answering. Given documents, I will auto-generate a question-answer eval "
|
385 |
"set and evaluate using the selected chain settings. Experiments with different configurations are logged. "
|
386 |
-
"Optionally, provide your own eval set (as a JSON, see docs/karpathy-pod-eval.json for an example)
|
387 |
|
388 |
with st.form(key='file_inputs'):
|
389 |
uploaded_file = st.file_uploader("`Please upload a file to evaluate (.txt or .pdf):` ",
|
@@ -396,7 +407,11 @@ with st.form(key='file_inputs'):
|
|
396 |
|
397 |
submitted = st.form_submit_button("Submit files")
|
398 |
|
399 |
-
if uploaded_file:
|
|
|
|
|
|
|
|
|
400 |
|
401 |
# Load docs
|
402 |
text = load_docs(uploaded_file)
|
@@ -470,3 +485,7 @@ if uploaded_file:
|
|
470 |
color='expt number',
|
471 |
tooltip=['expt number', 'Retrieval score', 'Latency', 'Answer score'])
|
472 |
st.altair_chart(c, use_container_width=True, theme="streamlit")
|
|
|
|
|
|
|
|
|
|
13 |
from io import StringIO
|
14 |
from llama_index import Document
|
15 |
from langchain.llms import Anthropic
|
16 |
+
from langchain import HuggingFaceHub
|
17 |
from langchain.chains import RetrievalQA
|
18 |
from langchain.vectorstores import FAISS
|
19 |
+
from llama_index import LangchainEmbedding
|
20 |
from langchain.chat_models import ChatOpenAI
|
21 |
from langchain.retrievers import SVMRetriever
|
22 |
from langchain.chains import QAGenerationChain
|
|
|
136 |
chosen_model = ChatOpenAI(model_name=model_version, temperature=0)
|
137 |
elif model_version == "anthropic":
|
138 |
chosen_model = Anthropic(temperature=0)
|
139 |
+
elif model_version == "flan-t5-xl":
|
140 |
+
chosen_model = HuggingFaceHub(repo_id="google/flan-t5-xl",model_kwargs={"temperature":0,"max_length":64})
|
141 |
else:
|
142 |
st.warning("`Model version not recognized. Using gpt-3.5-turbo`", icon="⚠️")
|
143 |
chosen_model = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
|
144 |
return chosen_model
|
145 |
|
|
|
146 |
@st.cache_resource
|
147 |
def make_retriever(splits, retriever_type, embedding_type, num_neighbors, _llm):
|
148 |
"""
|
|
|
335 |
# Auth
|
336 |
st.sidebar.image("img/diagnostic.jpg")
|
337 |
|
338 |
+
oai_api_key = st.sidebar.text_input("`OpenAI API Key:`", type="password")
|
339 |
+
ant_api_key = st.sidebar.text_input("`(Optional) Anthropic API Key:`", type="password")
|
340 |
+
hf_api_key = st.sidebar.text_input("`(Optional) HuggingFace API Token:`", type="password")
|
341 |
+
|
342 |
with st.sidebar.form("user_input"):
|
343 |
+
|
344 |
num_eval_questions = st.select_slider("`Number of eval questions`",
|
345 |
options=[1, 5, 10, 15, 20], value=5)
|
346 |
|
|
|
359 |
("gpt-3.5-turbo",
|
360 |
"gpt-4",
|
361 |
"anthropic"),
|
362 |
+
# Error raised by inference API: Model google/flan-t5-xl time out
|
363 |
+
#"flan-t5-xl"),
|
364 |
index=0)
|
365 |
|
366 |
retriever_type = st.radio("`Choose retriever`",
|
|
|
387 |
|
388 |
submitted = st.form_submit_button("Submit evaluation")
|
389 |
|
390 |
+
st.sidebar.write("`By:` [@RLanceMartin](https://twitter.com/RLanceMartin)")
|
391 |
+
|
392 |
# App
|
393 |
st.header("`Auto-evaluator`")
|
394 |
st.info(
|
395 |
+
"`I am an evaluation tool for question-answering built on LangChain. Given documents, I will auto-generate a question-answer eval "
|
396 |
"set and evaluate using the selected chain settings. Experiments with different configurations are logged. "
|
397 |
+
"Optionally, provide your own eval set (as a JSON, see docs/karpathy-pod-eval.json for an example). If you don't have acess to GPT-4 or Anthropic, you can use our free hosted app here: https://autoevaluator.langchain.com/`")
|
398 |
|
399 |
with st.form(key='file_inputs'):
|
400 |
uploaded_file = st.file_uploader("`Please upload a file to evaluate (.txt or .pdf):` ",
|
|
|
407 |
|
408 |
submitted = st.form_submit_button("Submit files")
|
409 |
|
410 |
+
if uploaded_file and oai_api_key:
|
411 |
+
|
412 |
+
os.environ["OPENAI_API_KEY"] = oai_api_key
|
413 |
+
os.environ["ANTHROPIC_API_KEY"] = ant_api_key
|
414 |
+
os.environ["HUGGINGFACEHUB_API_TOKEN"] = hf_api_key
|
415 |
|
416 |
# Load docs
|
417 |
text = load_docs(uploaded_file)
|
|
|
485 |
color='expt number',
|
486 |
tooltip=['expt number', 'Retrieval score', 'Latency', 'Answer score'])
|
487 |
st.altair_chart(c, use_container_width=True, theme="streamlit")
|
488 |
+
|
489 |
+
else:
|
490 |
+
|
491 |
+
st.warning("Please input file and API key(s)!")
|