mobinln commited on
Commit
8e0a273
1 Parent(s): 5875608

feat: version 1

Browse files
Files changed (4) hide show
  1. app.py +23 -15
  2. data/sales.csv +11 -0
  3. employees +0 -0
  4. llm.py +13 -31
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import streamlit as st
2
  from llm import load_llm, response_generator
3
- from sql import csv_to_sqlite
4
 
5
 
6
- # repo_id = "Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF"
7
- repo_id = "Qwen/Qwen2.5-0.5B-Instruct-GGUF"
8
- # filename="qwen2.5-coder-1.5b-instruct-q8_0.gguf"
9
- filename = "qwen2.5-0.5b-instruct-q8_0.gguf"
10
 
11
  llm = load_llm(repo_id, filename)
12
 
@@ -38,7 +38,12 @@ for message in st.session_state.messages:
38
  st.markdown(message["content"])
39
 
40
  # Accept user input
41
- if prompt := st.chat_input("What is up?"):
 
 
 
 
 
42
  # Add user message to chat history
43
  st.session_state.messages.append({"role": "user", "content": prompt})
44
  # Display user message in chat message container
@@ -47,14 +52,17 @@ if prompt := st.chat_input("What is up?"):
47
 
48
  # Display assistant response in chat message container
49
  with st.chat_message("assistant"):
50
- response = st.write(
51
- response_generator(
52
- db_name=st.session_state.db_name,
53
- table_name=st.session_state.table_name,
54
- llm=llm,
55
- messages=st.session_state.messages,
56
- question=prompt,
57
- )
58
  )
 
 
 
 
 
59
  # Add assistant response to chat history
60
- st.session_state.messages.append({"role": "assistant", "content": response})
 
1
  import streamlit as st
2
  from llm import load_llm, response_generator
3
+ from sql import csv_to_sqlite, run_sql_query
4
 
5
 
6
+ repo_id = "Qwen/Qwen2.5-Coder-1.5B-Instruct-GGUF"
7
+ filename = "qwen2.5-coder-1.5b-instruct-q8_0.gguf"
8
+ # repo_id = "Qwen/Qwen2.5-0.5B-Instruct-GGUF"
9
+ # filename = "qwen2.5-0.5b-instruct-q8_0.gguf"
10
 
11
  llm = load_llm(repo_id, filename)
12
 
 
38
  st.markdown(message["content"])
39
 
40
  # Accept user input
41
+ if prompt := st.chat_input(
42
+ "What is up?",
43
+ disabled=(
44
+ not "db_name" in st.session_state or not "table_name" in st.session_state
45
+ ),
46
+ ):
47
  # Add user message to chat history
48
  st.session_state.messages.append({"role": "user", "content": prompt})
49
  # Display user message in chat message container
 
52
 
53
  # Display assistant response in chat message container
54
  with st.chat_message("assistant"):
55
+ response_sql = response_generator(
56
+ db_name=st.session_state.db_name,
57
+ table_name=st.session_state.table_name,
58
+ llm=llm,
59
+ messages=st.session_state.messages,
60
+ question=prompt,
 
 
61
  )
62
+ response = st.markdown(response_sql)
63
+ result = run_sql_query(db_name=st.session_state.db_name, query=response_sql)
64
+ st.markdown(result)
65
+ st.table(result)
66
+
67
  # Add assistant response to chat history
68
+ st.session_state.messages.append({"role": "assistant", "content": response_sql})
data/sales.csv ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Date,Product,Category,Quantity,Unit_Price,Total,Customer,Region
2
+ 2024-01-15,Laptop Pro X,Electronics,2,999.99,1999.98,John Smith,West
3
+ 2024-01-15,Wireless Mouse,Accessories,5,24.99,124.95,Maria Garcia,South
4
+ 2024-01-16,Desktop Monitor,Electronics,3,299.99,899.97,Robert Wilson,North
5
+ 2024-01-17,USB-C Cable,Accessories,10,12.99,129.90,Emily Chen,East
6
+ 2024-01-17,Laptop Pro X,Electronics,1,999.99,999.99,David Brown,West
7
+ 2024-01-18,Keyboard Elite,Accessories,4,89.99,359.96,Sarah Johnson,North
8
+ 2024-01-18,Webcam HD,Electronics,2,79.99,159.98,James Lee,South
9
+ 2024-01-19,Laptop Stand,Accessories,3,34.99,104.97,Lisa Anderson,East
10
+ 2024-01-19,Desktop Monitor,Electronics,2,299.99,599.98,Michael Wong,West
11
+ 2024-01-20,Wireless Mouse,Accessories,6,24.99,149.94,Rachel Martinez,North
employees DELETED
Binary file (8.19 kB)
 
llm.py CHANGED
@@ -19,7 +19,7 @@ def load_llm(repo_id, filename):
19
  return llm
20
 
21
 
22
- def generate_llm_prompt(table_name, table_schema):
23
  """
24
  Generates a prompt to provide context about a table's schema for LLM to convert natural language to SQL.
25
 
@@ -48,45 +48,27 @@ def generate_llm_prompt(table_name, table_schema):
48
  return prompt
49
 
50
 
51
- def generate_sql_query(question, table_name, db_name):
52
- pass
53
- # table_name = 'movies'
54
- # db_name = 'movies_db.db'
55
- # table_schema = get_table_schema(db_name, table_name)
56
- # llm_prompt = generate_llm_prompt(table_name, table_schema)
57
- # user_prompt = """Question: {question}"""
58
- # response = completion(
59
- # api_key=OPENAI_API_KEY,
60
- # model="gpt-4o-mini",
61
- # messages=[
62
- # ,
63
- # {"content": user_prompt.format(question=question),"role": "user"}],
64
- # max_tokens=1000
65
- # )
66
- # answer = response.choices[0].message.content
67
-
68
- # query = answer.replace("```sql", "").replace("```", "")
69
- # query = query.strip()
70
- # return query
71
-
72
-
73
  # Streamed response emulator
74
  def response_generator(llm, messages, question, table_name, db_name):
75
  table_schema = get_table_schema(db_name, table_name)
76
- llm_prompt = generate_llm_prompt(table_name, table_schema)
77
- user_prompt = """Question: {question}"""
78
 
79
- messages = [{"content": llm_prompt.format(table_name=table_name), "role": "system"}]
 
80
 
81
- for val in st.session_state.messages:
82
- messages.append(val)
83
 
84
- messages.append({"role": "user", "content": user_prompt})
85
 
86
  response = llm.create_chat_completion(
87
- messages, max_tokens=2048, temperature=0.7, top_p=0.95
 
 
 
88
  )
89
- answer = response["choices"][0].message.content
90
 
91
  query = answer.replace("```sql", "").replace("```", "")
92
  query = query.strip()
 
19
  return llm
20
 
21
 
22
+ def generate_system_prompt(table_name, table_schema):
23
  """
24
  Generates a prompt to provide context about a table's schema for LLM to convert natural language to SQL.
25
 
 
48
  return prompt
49
 
50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  # Streamed response emulator
52
  def response_generator(llm, messages, question, table_name, db_name):
53
  table_schema = get_table_schema(db_name, table_name)
54
+ llm_prompt = generate_system_prompt(table_name, table_schema)
55
+ user_prompt = f"""Question: {question}"""
56
 
57
+ print(messages, llm_prompt, user_prompt)
58
+ history = [{"content": llm_prompt.format(table_name=table_name), "role": "system"}]
59
 
60
+ for val in messages:
61
+ history.append(val)
62
 
63
+ history.append({"role": "user", "content": user_prompt})
64
 
65
  response = llm.create_chat_completion(
66
+ messages=history,
67
+ max_tokens=2048,
68
+ temperature=0.7,
69
+ top_p=0.95,
70
  )
71
+ answer = response["choices"][0]["message"]["content"]
72
 
73
  query = answer.replace("```sql", "").replace("```", "")
74
  query = query.strip()