Spaces:
Runtime error
Runtime error
shivanis14
commited on
Upload 7 files
Browse files- app.py +90 -0
- appDeepseekCoder.py +92 -0
- chainlit.md +3 -0
- prompts.py +24 -0
- public/test.css +14 -0
- public/test.js +0 -0
- requirements.txt +1 -0
app.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#Reference : https://medium.com/@tahreemrasul/building-a-chatbot-application-with-chainlit-and-langchain-3e86da0099a6
|
2 |
+
from langchain_openai import ChatOpenAI
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from prompts import maths_assistant_prompt_template
|
5 |
+
from langchain.memory.buffer import ConversationBufferMemory
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
import os
|
9 |
+
import chainlit as cl
|
10 |
+
|
11 |
+
# Load environment variables from .env file
|
12 |
+
load_dotenv()
|
13 |
+
|
14 |
+
|
15 |
+
@cl.on_chat_start
|
16 |
+
async def start_llm():
|
17 |
+
llm = ChatOpenAI(model='gpt-4o-mini',
|
18 |
+
temperature=0.5)
|
19 |
+
conversation_memory = ConversationBufferMemory(memory_key="chat_history",
|
20 |
+
max_len=50,
|
21 |
+
return_messages=True,
|
22 |
+
)
|
23 |
+
llm_chain = LLMChain(llm=llm, prompt=maths_assistant_prompt_template, memory=conversation_memory)
|
24 |
+
cl.user_session.set("llm_chain", llm_chain)
|
25 |
+
|
26 |
+
#Send initial message to the user
|
27 |
+
#await cl.Message("What is your topic of interest?").send()
|
28 |
+
# Send initial message with selectable buttons
|
29 |
+
actions = [
|
30 |
+
cl.Action(name="Probability", value="Probability", description="Select Quiz Topic!"),
|
31 |
+
cl.Action(name="Linear Algebra", value="Linear Algebra", description="Select Quiz Topic!"),
|
32 |
+
cl.Action(name="Accounts", value="Accounts", description="Select Quiz Topic!"),
|
33 |
+
cl.Action(name="Calculus", value="Calculus", description="Select Quiz Topic!")
|
34 |
+
]
|
35 |
+
await cl.Message(content="**Pick a Topic and Let the Quiz Adventure Begin!** ππ", actions=actions).send()
|
36 |
+
|
37 |
+
|
38 |
+
|
39 |
+
@cl.on_message
|
40 |
+
async def query_llm(message: cl.Message):
|
41 |
+
llm_chain = cl.user_session.get("llm_chain")
|
42 |
+
#selected_topic = cl.user_session.get("selected_topic", "probability") # Default to probability if not set
|
43 |
+
print("Message being sent to the LLM is")
|
44 |
+
print(message.content)
|
45 |
+
#response = await llm_chain.ainvoke(message.content,
|
46 |
+
# callbacks=[
|
47 |
+
# cl.AsyncLangchainCallbackHandler()])
|
48 |
+
|
49 |
+
response = await llm_chain.ainvoke({
|
50 |
+
"chat_history": llm_chain.memory.load_memory_variables({})["chat_history"],
|
51 |
+
"question": message.content
|
52 |
+
}, callbacks=[
|
53 |
+
cl.AsyncLangchainCallbackHandler()
|
54 |
+
])
|
55 |
+
await cl.Message(response["text"]).send()
|
56 |
+
|
57 |
+
|
58 |
+
async def send_good_luck_message():
|
59 |
+
await cl.Message(content="Good luck! π", align="bottom").send()
|
60 |
+
|
61 |
+
async def handle_topic_selection(action: cl.Action):
|
62 |
+
llm_chain = cl.user_session.get("llm_chain")
|
63 |
+
#cl.user_session.set("selected_topic", action.value)
|
64 |
+
#await cl.Message(content=f"Selected {action.value}").send()
|
65 |
+
response = await llm_chain.ainvoke({
|
66 |
+
"chat_history": llm_chain.memory.load_memory_variables({})["chat_history"],
|
67 |
+
"question": f"Quiz me on the topic {action.value}."
|
68 |
+
}, callbacks=[
|
69 |
+
cl.AsyncLangchainCallbackHandler()
|
70 |
+
])
|
71 |
+
await cl.Message(response["text"]).send()
|
72 |
+
|
73 |
+
@cl.action_callback("Linear Algebra")
|
74 |
+
async def on_action(action: cl.Action):
|
75 |
+
await handle_topic_selection(action)
|
76 |
+
|
77 |
+
@cl.action_callback("Probability")
|
78 |
+
async def on_action(action: cl.Action):
|
79 |
+
await handle_topic_selection(action)
|
80 |
+
|
81 |
+
@cl.action_callback("Accounts")
|
82 |
+
async def on_action(action: cl.Action):
|
83 |
+
await handle_topic_selection(action)
|
84 |
+
|
85 |
+
@cl.action_callback("Calculus")
|
86 |
+
async def on_action(action: cl.Action):
|
87 |
+
await handle_topic_selection(action)
|
88 |
+
|
89 |
+
|
90 |
+
|
appDeepseekCoder.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#Reference : https://medium.com/@tahreemrasul/building-a-chatbot-application-with-chainlit-and-langchain-3e86da0099a6
|
2 |
+
#Reference : https://platform.deepseek.com/api-docs/api/create-chat-completion
|
3 |
+
from langchain.chains import LLMChain
|
4 |
+
from prompts import maths_assistant_prompt_template
|
5 |
+
from langchain.memory.buffer import ConversationBufferMemory
|
6 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, GenerationConfig
|
7 |
+
import torch
|
8 |
+
import chainlit as cl
|
9 |
+
#from chainlit import Button # Import Button component
|
10 |
+
# Load the model and tokenizer
|
11 |
+
model_name = "deepseek-ai/deepseek-math-7b-instruct"
|
12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
13 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
|
14 |
+
model.generation_config = GenerationConfig.from_pretrained(model_name)
|
15 |
+
model.generation_config.pad_token_id = model.generation_config.eos_token_id
|
16 |
+
|
17 |
+
|
18 |
+
@cl.on_chat_start
|
19 |
+
async def start_llm():
|
20 |
+
|
21 |
+
conversation_memory = ConversationBufferMemory(memory_key="chat_history",
|
22 |
+
max_len=50,
|
23 |
+
return_messages=True,
|
24 |
+
)
|
25 |
+
llm_chain = LLMChain(llm=model, prompt=maths_assistant_prompt_template, memory=conversation_memory)
|
26 |
+
cl.user_session.set("llm_chain", llm_chain)
|
27 |
+
|
28 |
+
#Send initial message to the user
|
29 |
+
#await cl.Message("What is your topic of interest?").send()
|
30 |
+
# Send initial message with selectable buttons
|
31 |
+
actions = [
|
32 |
+
cl.Action(name="Probability", value="Probability", description="Select Quiz Topic!"),
|
33 |
+
cl.Action(name="Linear Algebra", value="Linear Algebra", description="Select Quiz Topic!"),
|
34 |
+
cl.Action(name="Accounts", value="Accounts", description="Select Quiz Topic!"),
|
35 |
+
cl.Action(name="Calculus", value="Calculus", description="Select Quiz Topic!")
|
36 |
+
]
|
37 |
+
await cl.Message(content="**Pick a Topic and Let the Quiz Adventure Begin!** ππ", actions=actions).send()
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
@cl.on_message
|
42 |
+
async def query_llm(message: cl.Message):
|
43 |
+
llm_chain = cl.user_session.get("llm_chain")
|
44 |
+
#selected_topic = cl.user_session.get("selected_topic", "probability") # Default to probability if not set
|
45 |
+
print("Message being sent to the LLM is")
|
46 |
+
print(message.content)
|
47 |
+
#response = await llm_chain.ainvoke(message.content,
|
48 |
+
# callbacks=[
|
49 |
+
# cl.AsyncLangchainCallbackHandler()])
|
50 |
+
|
51 |
+
response = await llm_chain.ainvoke({
|
52 |
+
"chat_history": llm_chain.memory.load_memory_variables({})["chat_history"],
|
53 |
+
"question": message.content
|
54 |
+
}, callbacks=[
|
55 |
+
cl.AsyncLangchainCallbackHandler()
|
56 |
+
])
|
57 |
+
await cl.Message(response["text"]).send()
|
58 |
+
|
59 |
+
|
60 |
+
async def send_good_luck_message():
|
61 |
+
await cl.Message(content="Good luck! π", align="bottom").send()
|
62 |
+
|
63 |
+
async def handle_topic_selection(action: cl.Action):
|
64 |
+
llm_chain = cl.user_session.get("llm_chain")
|
65 |
+
#cl.user_session.set("selected_topic", action.value)
|
66 |
+
#await cl.Message(content=f"Selected {action.value}").send()
|
67 |
+
response = await llm_chain.ainvoke({
|
68 |
+
"chat_history": llm_chain.memory.load_memory_variables({})["chat_history"],
|
69 |
+
"question": f"Quiz me on the topic {action.value}."
|
70 |
+
}, callbacks=[
|
71 |
+
cl.AsyncLangchainCallbackHandler()
|
72 |
+
])
|
73 |
+
await cl.Message(response["text"]).send()
|
74 |
+
|
75 |
+
@cl.action_callback("Linear Algebra")
|
76 |
+
async def on_action(action: cl.Action):
|
77 |
+
await handle_topic_selection(action)
|
78 |
+
|
79 |
+
@cl.action_callback("Probability")
|
80 |
+
async def on_action(action: cl.Action):
|
81 |
+
await handle_topic_selection(action)
|
82 |
+
|
83 |
+
@cl.action_callback("Accounts")
|
84 |
+
async def on_action(action: cl.Action):
|
85 |
+
await handle_topic_selection(action)
|
86 |
+
|
87 |
+
@cl.action_callback("Calculus")
|
88 |
+
async def on_action(action: cl.Action):
|
89 |
+
await handle_topic_selection(action)
|
90 |
+
|
91 |
+
|
92 |
+
|
chainlit.md
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
# π¨ Welcome to Mathy! π¦
|
2 |
+
|
3 |
+
Hi there! π I am Mathy and I am built to quiz you on mathematics related topics. You can begin by asking me entering the topic of your interest.
|
prompts.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.prompts import PromptTemplate
|
2 |
+
#topic = "probability"
|
3 |
+
|
4 |
+
maths_assistant_template = """
|
5 |
+
You are a math tutor. Your task is to ask the student multiple choice questions related to their topic of interest. The student will select their answer by choosing A, B, C, or D (case insensitive, so a/b/c/d are also acceptable).
|
6 |
+
|
7 |
+
Here are the guidelines:
|
8 |
+
|
9 |
+
Always say "Input your answer as A, B, C, or D" at the end of every question.
|
10 |
+
Correct Answers: If the student's picks the correct answer choice A or B or C or D or a or b or c or d, promptly move on to the next question.
|
11 |
+
Incorrect Answers: If the student's answer is incorrect, avoid giving the answer directly. Instead, encourage the student to think critically about the question. Offer hints or guidance to help them arrive at the correct answer.
|
12 |
+
Difficulty: If the student finds the question too difficult, you can provide a hint or ask if they would like to skip the question.
|
13 |
+
Answer Requests: If the student asks for the answer, provide a detailed explanation of the correct answer.
|
14 |
+
Non-Math Questions: If the student asks a question not related to mathematics, politely remind them that you are a math tutor and request that they ask a math-related question. Do not confuse these questions with the multiple-choice answer options A, B, C, or D.
|
15 |
+
Changing quiz topic : Always allow students to change the topic in the middle of the quiz.
|
16 |
+
|
17 |
+
Chat History: {chat_history}
|
18 |
+
Question: {question}
|
19 |
+
Answer:"""
|
20 |
+
|
21 |
+
maths_assistant_prompt_template = PromptTemplate(
|
22 |
+
input_variables=["chat_history", "question"],
|
23 |
+
template=maths_assistant_template
|
24 |
+
)
|
public/test.css
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
a[href*='https://github.com/Chainlit/chainlit'] {
|
2 |
+
visibility: collapse;/* Hides the original text */
|
3 |
+
position: relative;
|
4 |
+
}
|
5 |
+
|
6 |
+
a[href*='https://github.com/Chainlit/chainlit']::after {
|
7 |
+
content: "If you want to change the quiz topic, scroll up and hit the appropiate button!"; /* New text to display */
|
8 |
+
visibility: visible;
|
9 |
+
color: white; /* Color of the new text */
|
10 |
+
width: 100%;
|
11 |
+
height: 100%;
|
12 |
+
font-size: 13px;
|
13 |
+
cursor: pointer;
|
14 |
+
}
|
public/test.js
ADDED
File without changes
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
chainlit
|