File size: 8,577 Bytes
e39fd18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 |
import streamlit as st
import streamlit.components.v1 as components
import openai
import subprocess
import re
import os
import pandas as pd
import socket
import time
# csv_name = "./Financial_Sample.csv"
csv_name='./global_superstore_2016.csv'
try:
df = pd.read_csv(csv_name)
metadata = {
"columns": df.columns.tolist(),
"dtypes": df.dtypes.apply(lambda x: x.name).to_dict(),
"shape": df.shape,
}
except Exception as e:
st.error(f"Error loading CSV file: {e}")
st.stop()
def is_port_in_use(port: int) -> bool:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(("localhost", port)) == 0
def find_free_port(starting_port: int) -> int:
port = starting_port
while is_port_in_use(port):
port += 1
return port
def runcode(code: str) -> tuple:
with open("code.py", "w") as file:
file.write(code)
starting_port = 8501
free_port = find_free_port(starting_port)
try:
process = subprocess.Popen(
["streamlit", "run", "code.py", "--server.port", str(free_port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
)
time.sleep(5)
if process.poll() is None:
return (
free_port,
f"Streamlit process started successfully on port {free_port}",
)
else:
stdout, stderr = process.communicate()
return (None, f"Streamlit process exited unexpectedly. Error: {stderr}")
except FileNotFoundError:
return (None, "Error: Streamlit is not installed or not in PATH")
except Exception as e:
return (None, f"An unexpected error occurred: {str(e)}")
class Agent:
def __init__(self, system_prompt="", known_actions=None):
self.system = system_prompt
self.messages = []
self.known_actions = known_actions if known_actions is not None else {}
self.client = openai.OpenAI(
api_key= os.environ.get('TOGETHER_API_KEY'),
base_url="https://api.together.xyz/v1",
)
self.messages.append({"role": "system", "content": self.system})
def __call__(self, message):
self.messages.append({"role": "user", "content": message})
result = self.execute()
self.messages.append({"role": "assistant", "content": result})
return result
def execute(self):
try:
# Using 405B for Better performance
response = self.client.chat.completions.create(
model="meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
stop=["PAUSE"],
messages=self.messages,
)
return response.choices[0].message.content
except Exception as e:
return f"Error executing model: {str(e)}"
def query(self, question, max_turns=5):
i = 0
next_prompt = question
while i < max_turns:
i += 1
result = self(next_prompt)
st.session_state.logs += f"Assistant: {result}\n"
print(f"Assistant: {result}\n")
# action_re = re.search(r"Action: (\w+): (.*)", result, re.DOTALL)
# Modify RE for detecting if input goes to next line TODO,BUG
action_re = re.search(r"Action: (\w+): *\n(.*)", result, re.DOTALL)
if action_re:
action = action_re.group(1)
action_input = action_re.group(2).strip()
st.session_state.logs += (
f"Action: {action}\nAction Input: {action_input}\n"
)
print(f"Action: {action}\nAction Input: {action_input}\n")
if action not in self.known_actions:
error_msg = f"Unknown action: {action}: {action_input}"
raise Exception(error_msg)
st.session_state.logs += (
f" ------------------------\n running {action} {action_input}\n"
)
print(f" ------------------------\n running {action} {action_input}\n")
observation = self.known_actions[action](action_input)
st.session_state.logs += f"Observation: {observation}\n"
print(f"Observation: {observation}\n")
next_prompt = f"Observation: {observation}"
else:
print("No action found, ending query")
return
known_actions = {"runcode": runcode}
prompt = f"""
You are an expert in creating advanced Interactive Streamlit Dashboards in python based on user Query and you run in a loop of Thought, Action, PAUSE, Observation.
At the end of the loop you output an Answer
Use Thought to describe your thoughts about the question you have been asked.
Use Action to run one of the actions available to you - then return PAUSE.
Observation will be the result of running those actions.
Always return just code no need of ```
Your Task is help user get result of query about below dataset,Decide based on user query to make Dashboard or Just Textual Answer.
Here is the metadata of the dataset and name of dataset is {csv_name}:
use plotly preferably.
Columns: {metadata['columns']}
Dtypes: {metadata['dtypes']}
Shape: {metadata['shape']}
You can use this metadata to generate results.
Your available actions are:
runcode
How to use actions
Action : action_name: input_to_action
if input_to_action is code then don't use ``` just write code.
Always Follow Action : action_name: input_to_action
Example session:
Question: Give me a dashboard to visualize the people height and age
Thought: I need to run a create a dashboard where i can visualize the Subscriptions and Country data
Action: runcode: import streamlit as st
import pandas as pd
import plotly.express as px
from datetime import datetime
def load_data():
df = pd.read_csv(f{csv_name})
return df
df = load_data()
st.title("Height vs Age Visualization")
fig = px.scatter(df, x='age', y='height', opacity=0.6)
fig.update_layout(
xaxis_title="Age (years)",
yaxis_title="Height (cm)",
title="Height vs Age Scatter Plot"
)
st.plotly_chart(fig, use_container_width=True)
PAUSE
Observation : understand the output based its stdout and take necessary steps.
Answer: Final Answer for User Request if its Dashboard send "Please visit link to view dashboard" or Textual Answer "Your Interpretation of Answer"
""".strip()
if "bot" not in st.session_state:
st.session_state.bot = Agent(system_prompt=prompt, known_actions=known_actions)
st.set_page_config(layout="wide")
st.title("Customer Data Analysis")
if "logs" not in st.session_state:
st.session_state.logs = ""
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if query := st.chat_input("Enter your query about the dataset"):
st.session_state.messages.append({"role": "user", "content": query})
with st.chat_message("user"):
st.markdown(query)
st.session_state.logs = ""
with st.spinner("Generating response..."):
st.session_state.bot.query(query)
answer_match = re.search(r"Answer:", st.session_state.logs)
if answer_match:
answer_end_index = answer_match.end()
subsequent_text = st.session_state.logs[answer_end_index:].strip()
else:
st.warning("No answer found in the generated response.")
with st.chat_message("assistant"):
url_pattern = r"(https?://[^\s]+)"
url = re.findall(url_pattern, subsequent_text)
if url:
components.iframe(src=url[0], width=800, height=600)
st.write(subsequent_text)
else:
st.success(subsequent_text)
st.session_state.messages.append({"role": "assistant", "content": subsequent_text})
with st.sidebar:
with st.expander("Logs"):
st.code(st.session_state.logs)
st.title("Dataset Metadata")
with st.expander("Metadata"):
st.write("Columns:", metadata["columns"])
st.write("Dtypes:", metadata["dtypes"])
st.write("Shape:", metadata["shape"])
st.write("Sample Data:")
st.write(df.head())
# TODO - Add Clear Button |