zahidpichen commited on
Commit
5f1606d
·
verified ·
1 Parent(s): 5e47f7c

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +76 -0
app.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from Functions.write_stream import user_data
2
+ import streamlit as st
3
+ from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, ServiceContext
4
+ from llama_index.llms.llama_cpp import LlamaCPP
5
+ from llama_index.llms.llama_cpp.llama_utils import messages_to_prompt, completion_to_prompt
6
+ from langchain.embeddings.huggingface import HuggingFaceEmbeddings
7
+
8
+
9
+ directory = "Knowledge Base/"
10
+
11
+
12
+ documents = SimpleDirectoryReader(directory).load_data()
13
+
14
+ llm = LlamaCPP(
15
+ # You can pass in the URL to a GGML model to download it automatically
16
+ model_url='https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf',
17
+ # optionally, you can set the path to a pre-downloaded model instead of model_url
18
+ model_path=None,
19
+ temperature=0.75,
20
+ max_new_tokens=256,
21
+ # llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
22
+ context_window=3900,
23
+ messages_to_prompt=messages_to_prompt,
24
+ completion_to_prompt=completion_to_prompt,
25
+ verbose=True,
26
+ )
27
+ print("working -3")
28
+
29
+ embed_model = HuggingFaceEmbeddings(model_name="thenlper/gte-large")
30
+
31
+ print("working -2")
32
+
33
+ service_context = ServiceContext.from_defaults(
34
+ chunk_size= 256,
35
+ llm=llm,
36
+ embed_model=embed_model
37
+ )
38
+ print("working -1")
39
+
40
+ index = VectorStoreIndex.from_documents(documents, service_context=service_context, show_progress=True)
41
+ print("working 0")
42
+
43
+ query_engine = index.as_query_engine()
44
+
45
+
46
+
47
+ ###############============= USER INTERFACE (UI )###############=============
48
+
49
+
50
+ st.title("Wiki Bot")
51
+
52
+ if "messages" not in st.session_state:
53
+ st.session_state.messages = []
54
+
55
+
56
+ for message in st.session_state.messages:
57
+ with st.chat_message(message["role"]):
58
+ st.markdown(message["content"])
59
+
60
+
61
+ prompt = st.chat_input("Enter Your Question:")
62
+
63
+
64
+ if prompt:
65
+
66
+ with st.chat_message("user"):
67
+ st.markdown(prompt)
68
+ st.session_state.messages.append({"role":"user","content":prompt})
69
+
70
+ reply= query_engine.query(prompt)
71
+ response = user_data(function_name=reply)
72
+
73
+ with st.chat_message("assistant"):
74
+ st.write_stream(response)
75
+ print("working!!")
76
+ st.session_state.messages.append({"role":"assistant","content":reply})