JasperV13 commited on
Commit
75885cc
β€’
1 Parent(s): f1a5d6c

Upload 5 files

Browse files
Files changed (5) hide show
  1. Data_blog.txt +0 -0
  2. app.py +237 -0
  3. logo.png +0 -0
  4. requirements.txt +8 -0
  5. user.png +0 -0
Data_blog.txt ADDED
The diff for this file is too large to render. See raw diff
 
app.py ADDED
@@ -0,0 +1,237 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import subprocess
2
+
3
+ # sub_p_res = subprocess.run(['pip', 'install', 'langchain', 'sentence-transformers', 'transformers', 'faiss-gpu', 'PyPDF2',"gradio_client"], stdout=subprocess.PIPE).stdout.decode('utf-8') #<cc-cm>
4
+ # print("pip install downloded ", sub_p_res)
5
+
6
+
7
+ # command = 'CMAKE_ARGS="-DLLAMA_CUBLAS=on" FORCE_CMAKE=1 pip install llama-cpp-python'
8
+
9
+ # sub_p_res = subprocess.run(command, shell=True, check=True)
10
+
11
+ # print("llama-cpp-python GPU downloaded ",sub_p_res)
12
+
13
+ from gradio_client import Client
14
+ from langchain.document_loaders.text import TextLoader
15
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
16
+ from langchain.schema import Document
17
+ from langchain.embeddings import HuggingFaceEmbeddings
18
+ from langchain import PromptTemplate
19
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
20
+ from langchain.callbacks.manager import CallbackManager
21
+ from langchain.vectorstores import FAISS
22
+ from langchain.chains import RetrievalQA
23
+ from langchain.memory import ConversationBufferMemory
24
+ from langchain.chains import ConversationalRetrievalChain
25
+ from huggingface_hub import hf_hub_download
26
+ from langchain.llms import LlamaCpp
27
+ from langchain.chains import LLMChain
28
+
29
+ import time
30
+ import streamlit as st
31
+
32
+
33
+
34
+ loader = TextLoader("Data_blog.txt")
35
+ pages = loader.load()
36
+
37
+ def split_text(documents: list[Document]):
38
+ text_splitter = RecursiveCharacterTextSplitter(
39
+ chunk_size=1000,
40
+ chunk_overlap=150,
41
+ length_function=len,
42
+ add_start_index=True,
43
+ )
44
+ chunks = text_splitter.split_documents(documents)
45
+ print(f"Split {len(documents)} documents into {len(chunks)} chunks.")
46
+
47
+ document = chunks[10]
48
+ print(document.page_content)
49
+ print(document.metadata)
50
+
51
+ return chunks
52
+
53
+ chunks_text = split_text(pages)
54
+
55
+ embedding = HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2') # machi top
56
+
57
+ docs_text = [doc.page_content for doc in chunks_text]
58
+
59
+
60
+ VectorStore = FAISS.from_texts(docs_text, embedding=embedding)
61
+
62
+ MODEL_ID = "TheBloke/Mistral-7B-OpenOrca-GGUF"
63
+ MODEL_BASENAME = "mistral-7b-openorca.Q4_K_M.gguf"
64
+
65
+ model_path = hf_hub_download(
66
+ repo_id=MODEL_ID,
67
+ filename=MODEL_BASENAME,
68
+ resume_download=True,
69
+ )
70
+
71
+ print("model_path : ", model_path)
72
+
73
+ callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
74
+
75
+ CONTEXT_WINDOW_SIZE = 1500
76
+ MAX_NEW_TOKENS = 2000
77
+ N_BATCH = 512
78
+ n_gpu_layers = 40
79
+ kwargs = {
80
+ "model_path": model_path,
81
+ "n_ctx": CONTEXT_WINDOW_SIZE,
82
+ "max_tokens": MAX_NEW_TOKENS,
83
+ "n_batch": N_BATCH,
84
+ "n_gpu_layers": n_gpu_layers,
85
+ "callback_manager": callback_manager,
86
+ "verbose":True,
87
+ }
88
+
89
+
90
+ # Callbacks support token-wise streaming
91
+ callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
92
+
93
+ n_gpu_layers = 40 # Change this value based on your model and your GPU VRAM pool.
94
+ n_batch = 512 # Should be between 1 and n_ctx, consider the amount of VRAM in your GPU.
95
+ max_tokens = 2000
96
+ # Make sure the model path is correct for your system!
97
+ llm = LlamaCpp(
98
+ model_path=model_path,
99
+ n_gpu_layers=n_gpu_layers,
100
+
101
+ n_batch=n_batch,
102
+ max_tokens= max_tokens,
103
+ callback_manager=callback_manager,
104
+ verbose=True, # Verbose is required to pass to the callback manager
105
+ )
106
+
107
+ llm = LlamaCpp(**kwargs)
108
+
109
+ memory = ConversationBufferMemory(
110
+ memory_key="chat_history",
111
+ return_messages=True,
112
+ input_key='question',
113
+ output_key='answer'
114
+ )
115
+
116
+ # memory.clear()
117
+
118
+ qa = ConversationalRetrievalChain.from_llm(
119
+ llm,
120
+ chain_type="stuff",
121
+ retriever=VectorStore.as_retriever(search_kwargs={"k": 5}),
122
+ memory=memory,
123
+ return_source_documents=True,
124
+ verbose=False,
125
+ )
126
+ def translate(text, source="English", target="Moroccan Arabic"):
127
+ client = Client("https://facebook-seamless-m4t-v2-large.hf.space/--replicas/2bmbx/")
128
+ result = client.predict(
129
+ text,
130
+ source,
131
+ target,
132
+ api_name="/t2tt"
133
+ )
134
+ return result
135
+
136
+ #---------------------------------------------------------
137
+
138
+ import streamlit as st
139
+ import time
140
+
141
+
142
+ # App title
143
+ st.set_page_config(page_title="πŸ€–πŸ’Ό πŸ‡²πŸ‡¦ Financial advisor is Here",
144
+ page_icon="πŸ€–")
145
+
146
+ # Replicate Credentials
147
+ with st.sidebar:
148
+ st.title('Mokawil.AI is Here πŸ€–πŸ’Ό πŸ‡²πŸ‡¦')
149
+ st.markdown('πŸ€– An AI-powered advisor designed to assist founders (or anyone aspiring to start their own company) with various aspects of business in Morocco. This includes legal considerations, budget planning, strategies for success, and much more.')
150
+
151
+ # Store LLM generated responses
152
+ if "messages" not in st.session_state.keys():
153
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
154
+
155
+ # Display or clear chat messages
156
+ for message in st.session_state.messages:
157
+ if message["role"] == "user" :
158
+ with st.chat_message(message["role"], avatar="user.png"):
159
+ st.write(message["content"])
160
+ else :
161
+ with st.chat_message(message["role"], avatar="logo.png"):
162
+ st.write(message["content"])
163
+
164
+ def clear_chat_history():
165
+ memory.clear()
166
+ qa = ConversationalRetrievalChain.from_llm(
167
+ llm,
168
+ chain_type="stuff",
169
+ retriever=VectorStore.as_retriever(search_kwargs={"k": 5}),
170
+ memory=memory,
171
+ return_source_documents=True,
172
+ verbose=False,
173
+ )
174
+ st.session_state.messages = [{"role": "assistant", "content": "How may I assist you today?"}]
175
+
176
+ st.sidebar.button('Clear Chat History', on_click=clear_chat_history)
177
+ selected_language = st.sidebar.selectbox("Select Language", ["English", "Darija"], index=0) # English is the default
178
+
179
+ # Function for generating LLaMA2 response
180
+ def generate_llm_response(prompt_input):
181
+ res = qa(f'''{prompt_input}''')
182
+
183
+ if selected_language == "Darija":
184
+ translated_response = translate(res['answer'])
185
+ return translated_response
186
+ else:
187
+ return res['answer']
188
+
189
+ # User-provided prompt
190
+ if prompt := st.chat_input("What is up?"):
191
+ st.session_state.messages.append({"role": "user", "content": prompt})
192
+ with st.chat_message("user", avatar="user.png"):
193
+ st.write(prompt)
194
+
195
+ # Generate a new response if last message is not from assistant
196
+ if st.session_state.messages[-1]["role"] != "assistant":
197
+ with st.chat_message("assistant", avatar="logo.png"):
198
+ with st.spinner("Thinking..."):
199
+ response = generate_llm_response(st.session_state.messages[-1]["content"])
200
+ placeholder = st.empty()
201
+ full_response = ''
202
+ for item in response:
203
+ full_response += item
204
+ placeholder.markdown(full_response)
205
+ time.sleep(0.05)
206
+ placeholder.markdown(full_response)
207
+ message = {"role": "assistant", "content": full_response}
208
+ st.session_state.messages.append(message)
209
+
210
+ # Example prompt
211
+ with st.sidebar :
212
+ st.title('Examples :')
213
+
214
+ def promptExample1():
215
+ prompt = "How can I start my company in Morocco?"
216
+ st.session_state.messages.append({"role": "user", "content": prompt})
217
+
218
+ # Example prompt
219
+ def promptExample2():
220
+ prompt = "What are some recommended cities for starting a business in the finance sector?"
221
+ st.session_state.messages.append({"role": "user", "content": prompt})
222
+
223
+ # Example prompt
224
+ def promptExample3():
225
+ prompt = "What is the estimated amount of money I need to start my company?"
226
+ st.session_state.messages.append({"role": "user", "content": prompt})
227
+
228
+
229
+ st.sidebar.button('How can I start my company in Morocco?', on_click=promptExample1)
230
+ st.sidebar.button('What are some recommended cities for starting a business in the finance sector?', on_click=promptExample2)
231
+ st.sidebar.button('What is the estimated amount of money I need to start my company?', on_click=promptExample3)
232
+
233
+
234
+ with st.sidebar:
235
+ st.title('Disclaimer ⚠️:')
236
+ st.markdown('may introduce false information')
237
+ st.markdown('consult with a preofessionel advisor for more specific problems')
logo.png ADDED
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ sentence-transformers
3
+ transformers
4
+ faiss-gpu
5
+ PyPDF2
6
+ torch
7
+ llama-cpp-python
8
+ gradio_client
user.png ADDED