Spaces:
Sleeping
Sleeping
Custom prompts and gpt-4 model
Browse files
app.py
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
import streamlit as st
|
2 |
-
from index import build_index, build_service_context, load_documents
|
3 |
|
4 |
st.title("SAIRA: Student Affairs AI Response Assistant")
|
5 |
st.caption('Welcome to the SAIRA chatbot! This bot have knowledge about Innopolis University. Feel free to write your request!')
|
@@ -8,11 +8,11 @@ st.caption('Welcome to the SAIRA chatbot! This bot have knowledge about Innopoli
|
|
8 |
def load_docs_and_build_index():
|
9 |
service_context = build_service_context()
|
10 |
docs = load_documents()
|
11 |
-
|
|
|
|
|
|
|
12 |
|
13 |
-
index = load_docs_and_build_index()
|
14 |
-
|
15 |
-
query_engine = index.as_query_engine(streaming=True)
|
16 |
|
17 |
# Initialize chat history
|
18 |
if "messages" not in st.session_state:
|
|
|
1 |
import streamlit as st
|
2 |
+
from index import build_index, build_service_context, change_prompts, load_documents
|
3 |
|
4 |
st.title("SAIRA: Student Affairs AI Response Assistant")
|
5 |
st.caption('Welcome to the SAIRA chatbot! This bot have knowledge about Innopolis University. Feel free to write your request!')
|
|
|
8 |
def load_docs_and_build_index():
|
9 |
service_context = build_service_context()
|
10 |
docs = load_documents()
|
11 |
+
index = build_index(docs, service_context)
|
12 |
+
query_engine = index.as_query_engine(streaming=True)
|
13 |
+
change_prompts(query_engine)
|
14 |
+
return query_engine
|
15 |
|
|
|
|
|
|
|
16 |
|
17 |
# Initialize chat history
|
18 |
if "messages" not in st.session_state:
|
index.py
CHANGED
@@ -5,6 +5,7 @@ from llama_index import (
|
|
5 |
StorageContext,
|
6 |
load_index_from_storage
|
7 |
)
|
|
|
8 |
from llama_index.vector_stores import SimpleVectorStore
|
9 |
from llama_index.llms import Ollama, OpenAI
|
10 |
import os
|
@@ -20,7 +21,7 @@ def load_documents():
|
|
20 |
|
21 |
def build_service_context():
|
22 |
# llm = Ollama(model='mistral')
|
23 |
-
llm = OpenAI(model="gpt-
|
24 |
return ServiceContext.from_defaults(llm=llm, embed_model="local:BAAI/bge-large-en-v1.5")
|
25 |
|
26 |
def build_index(documents, service_context):
|
@@ -52,3 +53,29 @@ def build_index(documents, service_context):
|
|
52 |
# storage_context.persist(persist_dir=persist_dir)
|
53 |
index.storage_context.persist(persist_dir=persist_dir)
|
54 |
return index
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
StorageContext,
|
6 |
load_index_from_storage
|
7 |
)
|
8 |
+
from llama_index.prompts import ChatPromptTemplate, ChatMessage, MessageRole
|
9 |
from llama_index.vector_stores import SimpleVectorStore
|
10 |
from llama_index.llms import Ollama, OpenAI
|
11 |
import os
|
|
|
21 |
|
22 |
def build_service_context():
|
23 |
# llm = Ollama(model='mistral')
|
24 |
+
llm = OpenAI(model="gpt-4-1106-preview")
|
25 |
return ServiceContext.from_defaults(llm=llm, embed_model="local:BAAI/bge-large-en-v1.5")
|
26 |
|
27 |
def build_index(documents, service_context):
|
|
|
53 |
# storage_context.persist(persist_dir=persist_dir)
|
54 |
index.storage_context.persist(persist_dir=persist_dir)
|
55 |
return index
|
56 |
+
|
57 |
+
def change_prompts(query_engine):
|
58 |
+
message_templates = [
|
59 |
+
ChatMessage(content='''You are SAIRA (Student Affairs AI Response Assistant) - expert Q&A system for the students of Innopolis University.
|
60 |
+
Always answer the query using the provided context information, and not prior knowledge.
|
61 |
+
Never directly reference the given context in your answer. Never use file names or any other meta information in the answer.
|
62 |
+
If you mention person or department, provide also their Telegram or E-mail.
|
63 |
+
If you mention some Telegram chat, give the link to it''', role=MessageRole.SYSTEM),
|
64 |
+
ChatMessage(content='''Context information:
|
65 |
+
{context_str}
|
66 |
+
---------------------
|
67 |
+
Given the context information and not prior knowledge, answer the query.
|
68 |
+
|
69 |
+
Query: {query_str}
|
70 |
+
|
71 |
+
Avoid statements like 'based on the context' or 'the context information', 'in the context' or anything along those lines.
|
72 |
+
Never use word 'context' in the answer!
|
73 |
+
If you can't write answer, or it is not providied in context, just write '<SPECIALIST>' as an answer, and the request will be transferred to the specialist.
|
74 |
+
Write '<SPECIALIST>' instead of asking to contact Student Affairs.''', role=MessageRole.USER),
|
75 |
+
]
|
76 |
+
qa_prompt_tmpl = ChatPromptTemplate(message_templates)
|
77 |
+
|
78 |
+
query_engine.update_prompts(
|
79 |
+
{"response_synthesizer:text_qa_template": qa_prompt_tmpl}
|
80 |
+
)
|
81 |
+
|