force manual key entry if no env-var provided
Browse files- app.py +7 -2
- pedalo/agents/code_interpreter.py +3 -2
- pedalo/main.py +2 -2
app.py
CHANGED
@@ -4,19 +4,24 @@ from pandas import DataFrame
|
|
4 |
|
5 |
from pedalo.main import run
|
6 |
import pandas as pd
|
|
|
|
|
7 |
|
8 |
# st.set_page_config(layout="wide")
|
9 |
st.title("PEDALO - Productive Exploratory Data Analysis using Langchain interrOgation")
|
10 |
st.write("Ask your data what you wanna know!")
|
|
|
|
|
|
|
|
|
11 |
model = st.sidebar.radio("Which model do you wanna use?", ("gpt-4", "gpt-3.5-turbo"), index=1)
|
12 |
-
|
13 |
uploaded_file = st.sidebar.file_uploader("Choose a file", type=["csv"])
|
14 |
|
15 |
|
16 |
|
17 |
def run_df_analysis(prompt:str, df: DataFrame):
|
18 |
st_callback = StreamlitCallbackHandler(st.container())
|
19 |
-
response = run(prompt, df, st_callback, model)
|
20 |
st.write(response)
|
21 |
|
22 |
|
|
|
4 |
|
5 |
from pedalo.main import run
|
6 |
import pandas as pd
|
7 |
+
import os
|
8 |
+
import openai
|
9 |
|
10 |
# st.set_page_config(layout="wide")
|
11 |
st.title("PEDALO - Productive Exploratory Data Analysis using Langchain interrOgation")
|
12 |
st.write("Ask your data what you wanna know!")
|
13 |
+
if "OPENAI_API_KEY" in os.environ:
|
14 |
+
openai_api_key = os.environ.get("OPENAI_API_KEY")
|
15 |
+
else:
|
16 |
+
openai_api_key = st.sidebar.text_input("OPENAI_API_KEY")
|
17 |
model = st.sidebar.radio("Which model do you wanna use?", ("gpt-4", "gpt-3.5-turbo"), index=1)
|
|
|
18 |
uploaded_file = st.sidebar.file_uploader("Choose a file", type=["csv"])
|
19 |
|
20 |
|
21 |
|
22 |
def run_df_analysis(prompt:str, df: DataFrame):
|
23 |
st_callback = StreamlitCallbackHandler(st.container())
|
24 |
+
response = run(prompt, df, st_callback, openai_api_key, model)
|
25 |
st.write(response)
|
26 |
|
27 |
|
pedalo/agents/code_interpreter.py
CHANGED
@@ -12,7 +12,7 @@ model_grand_agent = "gpt-3.5-turbo"
|
|
12 |
|
13 |
|
14 |
def run(
|
15 |
-
prompt: str, df: DataFrame, st_callback: StreamlitCallbackHandler, model="gpt-4"
|
16 |
) -> str:
|
17 |
agent_executor_kwargs = {
|
18 |
"handle_parsing_errors": True,
|
@@ -22,6 +22,7 @@ def run(
|
|
22 |
temperature=0,
|
23 |
model=model_pandas_agent,
|
24 |
streaming=True,
|
|
|
25 |
),
|
26 |
df=df,
|
27 |
verbose=True,
|
@@ -47,7 +48,7 @@ def run(
|
|
47 |
takes as an input the entire question and returns the answer after running calculations""",
|
48 |
),
|
49 |
],
|
50 |
-
llm=ChatOpenAI(temperature=0, model=model_grand_agent, streaming=True),
|
51 |
agent_type=AgentType.OPENAI_FUNCTIONS,
|
52 |
# agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
53 |
verbose=True,
|
|
|
12 |
|
13 |
|
14 |
def run(
|
15 |
+
prompt: str, df: DataFrame, st_callback: StreamlitCallbackHandler, openai_api_key:str, model="gpt-4"
|
16 |
) -> str:
|
17 |
agent_executor_kwargs = {
|
18 |
"handle_parsing_errors": True,
|
|
|
22 |
temperature=0,
|
23 |
model=model_pandas_agent,
|
24 |
streaming=True,
|
25 |
+
openai_api_key=openai_api_key
|
26 |
),
|
27 |
df=df,
|
28 |
verbose=True,
|
|
|
48 |
takes as an input the entire question and returns the answer after running calculations""",
|
49 |
),
|
50 |
],
|
51 |
+
llm=ChatOpenAI(temperature=0, model=model_grand_agent, streaming=True, openai_api_key=openai_api_key),
|
52 |
agent_type=AgentType.OPENAI_FUNCTIONS,
|
53 |
# agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
54 |
verbose=True,
|
pedalo/main.py
CHANGED
@@ -13,9 +13,9 @@ openai.api_key = api_key
|
|
13 |
|
14 |
|
15 |
def run(
|
16 |
-
prompt: str, df: DataFrame, st_callback: StreamlitCallbackHandler, model="gpt-4"
|
17 |
):
|
18 |
-
result = code_interpreter.run(prompt, df, st_callback, model)
|
19 |
return result
|
20 |
|
21 |
|
|
|
13 |
|
14 |
|
15 |
def run(
|
16 |
+
prompt: str, df: DataFrame, st_callback: StreamlitCallbackHandler, openai_api_key: str, model="gpt-4"
|
17 |
):
|
18 |
+
result = code_interpreter.run(prompt, df, st_callback, openai_api_key, model)
|
19 |
return result
|
20 |
|
21 |
|