rishisim commited on
Commit
044b65e
1 Parent(s): 762e024

updated app.py using huggingface space

Browse files
Files changed (1) hide show
  1. app.py +152 -98
app.py CHANGED
@@ -1,27 +1,24 @@
1
  import gradio as gr
2
- import json
 
 
3
  from langchain.llms import GooglePalm
 
 
 
4
 
5
  api_key = "AIzaSyCdM_aAIsW_nPbjarOF83mbX1_z1cVX2_M"
6
 
7
  llm = GooglePalm(google_api_key = api_key, temperature=0.7)
8
 
9
- from langchain.document_loaders.csv_loader import CSVLoader
10
 
11
  loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
12
  data = loader.load()
13
 
14
- from langchain_huggingface import HuggingFaceEmbeddings
15
- from langchain.vectorstores import FAISS
16
 
17
- # instructor_embeddings = HuggingFaceEmbeddings(model_name = "Alibaba-NLP/gte-Qwen2-7B-instruct") # best model <-- but too big
18
  instructor_embeddings = HuggingFaceEmbeddings(model_name = "BAAI/bge-m3")
19
- # instructor_embeddings = HuggingFaceEmbeddings()
20
-
21
  vectordb = FAISS.from_documents(documents = data, embedding = instructor_embeddings)
22
 
23
- # e = embeddings_model.embed_query("What is your refund policy")
24
-
25
  retriever = vectordb.as_retriever()
26
 
27
  from langchain.prompts import PromptTemplate
@@ -49,104 +46,161 @@ chain = RetrievalQA.from_chain_type(llm = llm,
49
  return_source_documents=True,
50
  chain_type_kwargs = {"prompt": PROMPT})
51
 
52
- # Load your LLM model and necessary components
53
- # Assume `chain` is a function defined in your notebook that takes a query and returns the output as shown
54
- # For this example, we'll assume the model and chain function are already available
55
-
56
- def chatbot(query):
57
- response = chain(query)
58
- # Extract the 'result' part of the response
59
- result = response.get('result', 'Sorry, I could not find an answer.')
60
- return result
61
-
62
- # Define the Gradio interface
63
- iface = gr.Interface(
64
- fn=chatbot, # Function to call
65
- inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your question here..."), # Input type
66
- outputs="text", # Output type
67
- title="Hugging Face LLM Chatbot",
68
- description="Ask any question related to the documents and get an answer from the LLM model.",
69
- )
70
 
71
- # Launch the interface
72
- iface.launch()
73
 
74
- # Save this file as app.py and push it to your Hugging Face Space repository
75
 
76
  # import gradio as gr
 
77
 
78
- # def greet(name, intensity):
79
- # return "Hello, " + name + "!" * int(intensity)
80
 
81
- # demo = gr.Interface(
82
- # fn=greet,
83
- # inputs=["text", "slider"],
84
- # outputs=["text"],
85
- # )
86
 
87
- # demo.launch()
88
 
 
 
89
 
90
- # import gradio as gr
91
- # from huggingface_hub import InferenceClient
92
-
93
- # """
94
- # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
95
- # """
96
- # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
97
-
98
-
99
- # def respond(
100
- # message,
101
- # history: list[tuple[str, str]],
102
- # system_message,
103
- # max_tokens,
104
- # temperature,
105
- # top_p,
106
- # ):
107
- # messages = [{"role": "system", "content": system_message}]
108
-
109
- # for val in history:
110
- # if val[0]:
111
- # messages.append({"role": "user", "content": val[0]})
112
- # if val[1]:
113
- # messages.append({"role": "assistant", "content": val[1]})
114
-
115
- # messages.append({"role": "user", "content": message})
116
-
117
- # response = ""
118
-
119
- # for message in client.chat_completion(
120
- # messages,
121
- # max_tokens=max_tokens,
122
- # stream=True,
123
- # temperature=temperature,
124
- # top_p=top_p,
125
- # ):
126
- # token = message.choices[0].delta.content
127
-
128
- # response += token
129
- # yield response
130
-
131
- # """
132
- # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
133
- # """
134
- # demo = gr.ChatInterface(
135
- # respond,
136
- # additional_inputs=[
137
- # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
138
- # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
139
- # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
140
- # gr.Slider(
141
- # minimum=0.1,
142
- # maximum=1.0,
143
- # value=0.95,
144
- # step=0.05,
145
- # label="Top-p (nucleus sampling)",
146
- # ),
147
- # ],
148
  # )
149
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
150
 
151
- # if __name__ == "__main__":
152
- # demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+
3
+ import langchain, langchain_huggingface
4
+
5
  from langchain.llms import GooglePalm
6
+ from langchain.document_loaders.csv_loader import CSVLoader
7
+ from langchain_huggingface import HuggingFaceEmbeddings
8
+ from langchain.vectorstores import FAISS
9
 
10
  api_key = "AIzaSyCdM_aAIsW_nPbjarOF83mbX1_z1cVX2_M"
11
 
12
  llm = GooglePalm(google_api_key = api_key, temperature=0.7)
13
 
 
14
 
15
  loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
16
  data = loader.load()
17
 
 
 
18
 
 
19
  instructor_embeddings = HuggingFaceEmbeddings(model_name = "BAAI/bge-m3")
 
 
20
  vectordb = FAISS.from_documents(documents = data, embedding = instructor_embeddings)
21
 
 
 
22
  retriever = vectordb.as_retriever()
23
 
24
  from langchain.prompts import PromptTemplate
 
46
  return_source_documents=True,
47
  chain_type_kwargs = {"prompt": PROMPT})
48
 
49
+ def chatresponse(message, history):
50
+ output = chain(message)
51
+ return output['result']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
53
+ gr.ChatInterface(chatresponse).launch()
 
54
 
 
55
 
56
  # import gradio as gr
57
+ # from langchain.llms import GooglePalm
58
 
59
+ # api_key = "AIzaSyCdM_aAIsW_nPbjarOF83mbX1_z1cVX2_M"
 
60
 
61
+ # llm = GooglePalm(google_api_key = api_key, temperature=0.7)
 
 
 
 
62
 
63
+ # from langchain.document_loaders.csv_loader import CSVLoader
64
 
65
+ # loader = CSVLoader(file_path='aiotsmartlabs_faq.csv', source_column = 'prompt')
66
+ # data = loader.load()
67
 
68
+ # from langchain_huggingface import HuggingFaceEmbeddings
69
+ # from langchain.vectorstores import FAISS
70
+
71
+ # # instructor_embeddings = HuggingFaceEmbeddings(model_name = "Alibaba-NLP/gte-Qwen2-7B-instruct") # best model <-- but too big
72
+ # instructor_embeddings = HuggingFaceEmbeddings(model_name = "BAAI/bge-m3")
73
+ # # instructor_embeddings = HuggingFaceEmbeddings()
74
+
75
+ # vectordb = FAISS.from_documents(documents = data, embedding = instructor_embeddings)
76
+
77
+ # # e = embeddings_model.embed_query("What is your refund policy")
78
+
79
+ # retriever = vectordb.as_retriever()
80
+
81
+ # from langchain.prompts import PromptTemplate
82
+
83
+ # prompt_template = """Given the following context and a question, generate an answer based on the context only.
84
+
85
+ # In the answer try to provide as much text as possible from "response" section in the source document context without making much changes.
86
+ # If somebody asks "Who are you?" or a similar phrase, state "I am Rishi's assistant built using a Large Language Model!"
87
+ # If the answer is not found in the context, kindly state "I don't know. Please ask Rishi on Discord. Discord Invite Link: https://discord.gg/6ezpZGeCcM. Or email at rishi@aiotsmartlabs.com" Don't try to make up an answer.
88
+
89
+ # CONTEXT: {context}
90
+
91
+ # QUESTION: {question}"""
92
+
93
+ # PROMPT = PromptTemplate(
94
+ # template = prompt_template, input_variables = ["context", "question"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  # )
96
 
97
+ # from langchain.chains import RetrievalQA
98
+
99
+ # chain = RetrievalQA.from_chain_type(llm = llm,
100
+ # chain_type="stuff",
101
+ # retriever=retriever,
102
+ # input_key="query",
103
+ # return_source_documents=True,
104
+ # chain_type_kwargs = {"prompt": PROMPT})
105
+
106
+ # # Load your LLM model and necessary components
107
+ # # Assume `chain` is a function defined in your notebook that takes a query and returns the output as shown
108
+ # # For this example, we'll assume the model and chain function are already available
109
+
110
+ # def chatbot(query):
111
+ # response = chain(query)
112
+ # # Extract the 'result' part of the response
113
+ # result = response.get('result', 'Sorry, I could not find an answer.')
114
+ # return result
115
+
116
+ # # Define the Gradio interface
117
+ # iface = gr.Interface(
118
+ # fn=chatbot, # Function to call
119
+ # inputs=gr.inputs.Textbox(lines=2, placeholder="Enter your question here..."), # Input type
120
+ # outputs="text", # Output type
121
+ # title="Hugging Face LLM Chatbot",
122
+ # description="Ask any question related to the documents and get an answer from the LLM model.",
123
+ # )
124
 
125
+ # # Launch the interface
126
+ # iface.launch()
127
+
128
+ # # Save this file as app.py and push it to your Hugging Face Space repository
129
+
130
+ # # import gradio as gr
131
+
132
+ # # def greet(name, intensity):
133
+ # # return "Hello, " + name + "!" * int(intensity)
134
+
135
+ # # demo = gr.Interface(
136
+ # # fn=greet,
137
+ # # inputs=["text", "slider"],
138
+ # # outputs=["text"],
139
+ # # )
140
+
141
+ # # demo.launch()
142
+
143
+
144
+ # # import gradio as gr
145
+ # # from huggingface_hub import InferenceClient
146
+
147
+ # # """
148
+ # # For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
149
+ # # """
150
+ # # client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
151
+
152
+
153
+ # # def respond(
154
+ # # message,
155
+ # # history: list[tuple[str, str]],
156
+ # # system_message,
157
+ # # max_tokens,
158
+ # # temperature,
159
+ # # top_p,
160
+ # # ):
161
+ # # messages = [{"role": "system", "content": system_message}]
162
+
163
+ # # for val in history:
164
+ # # if val[0]:
165
+ # # messages.append({"role": "user", "content": val[0]})
166
+ # # if val[1]:
167
+ # # messages.append({"role": "assistant", "content": val[1]})
168
+
169
+ # # messages.append({"role": "user", "content": message})
170
+
171
+ # # response = ""
172
+
173
+ # # for message in client.chat_completion(
174
+ # # messages,
175
+ # # max_tokens=max_tokens,
176
+ # # stream=True,
177
+ # # temperature=temperature,
178
+ # # top_p=top_p,
179
+ # # ):
180
+ # # token = message.choices[0].delta.content
181
+
182
+ # # response += token
183
+ # # yield response
184
+
185
+ # # """
186
+ # # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
187
+ # # """
188
+ # # demo = gr.ChatInterface(
189
+ # # respond,
190
+ # # additional_inputs=[
191
+ # # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
192
+ # # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
193
+ # # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
194
+ # # gr.Slider(
195
+ # # minimum=0.1,
196
+ # # maximum=1.0,
197
+ # # value=0.95,
198
+ # # step=0.05,
199
+ # # label="Top-p (nucleus sampling)",
200
+ # # ),
201
+ # # ],
202
+ # # )
203
+
204
+
205
+ # # if __name__ == "__main__":
206
+ # # demo.launch()