Files changed (1) hide show
  1. app.py +182 -24
app.py CHANGED
@@ -1,5 +1,8 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
3
  import os
4
  import groq
5
  import warnings
@@ -7,12 +10,21 @@ import asyncio
7
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
8
  from llama_index.llms.groq import Groq
9
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
10
-
11
- # A warning may appear which doesn't
12
- # affect the operation of the code
13
- # Suppress it with this code
14
  warnings.filterwarnings("ignore", message=".*clean_up_tokenization_spaces.*")
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  # Global variables
17
  index = None
18
  query_engine = None
@@ -24,10 +36,6 @@ Settings.llm = llm # Ensure Groq is the LLM being used
24
  # Initialize our chosen embedding model
25
  embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
26
 
27
- # These are our RAG functions, called in response to user
28
- # initiated events e.g clicking the Load Documents button
29
- # on the GUI
30
- #
31
  def load_documents(file_objs):
32
  global index, query_engine
33
  try:
@@ -75,26 +83,176 @@ def clear_all():
75
  return None, "", [], "" # Reset file input, load output, chatbot, and message input to default states
76
 
77
 
78
- # Create the Gradio interface
79
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
80
- gr.Markdown("# RAG Multi-file Chat Application")
81
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
  with gr.Row():
83
- file_input = gr.File(label="Select files to load", file_count="multiple")
84
- load_btn = gr.Button("Load Documents")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
 
86
- load_output = gr.Textbox(label="Load Status")
 
 
 
87
 
88
- msg = gr.Textbox(label="Enter your question")
89
- chatbot = gr.Chatbot()
90
- clear = gr.Button("Clear")
91
 
92
- # Set up event handlers
93
- load_btn.click(load_documents, inputs=[file_input], outputs=[load_output])
94
- msg.submit(perform_rag, inputs=[msg, chatbot], outputs=[chatbot])
95
- clear.click(clear_all, outputs=[file_input, load_output, chatbot, msg], queue=False)
 
 
96
 
97
- # Run the app
 
98
  if __name__ == "__main__":
99
- demo.queue()
100
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import numpy as np
4
+ import pandas as pd
5
+ import matplotlib.pyplot as plt
6
  import os
7
  import groq
8
  import warnings
 
10
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
11
  from llama_index.llms.groq import Groq
12
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
 
 
 
 
13
  warnings.filterwarnings("ignore", message=".*clean_up_tokenization_spaces.*")
14
 
15
+ theme = gr.themes.Soft(
16
+ primary_hue=gr.themes.Color(c100="#cde3fb", c200="#cde3fb", c300="#81b9f4", c400="#4f9df0", c50="rgba(221.44736842105263, 237.49427917620136, 255, 1)", c500="#0473ea", c600="#0473ea", c700="#0473ea", c800="#0473ea", c900="#0083B3", c950="#0083b3"),
17
+ secondary_hue=gr.themes.Color(c100="#d7f6cc", c200="#d7f6cc", c300="#9be880", c400="#9be880", c50="#d7f6cc", c500="#74df4c", c600="#38d200", c700="#38d200", c800="rgba(50.43333333333328, 189.125, 0, 1)", c900="rgba(41.409166666666614, 155.28437499999998, 0, 1)", c950="#134e28"),
18
+ neutral_hue=gr.themes.Color(c100="#e5e5e5", c200="#d4d4d4", c300="#a8a9aa", c400="#a8a9aa", c50="#f9fafb", c500="rgb(134, 135, 136)", c600="rgb(134, 135, 136)", c700="#525355", c800="#525355", c900="rgba(52.90131578947368, 53.54254385964912, 54.82499999999999, 1)", c950="#353637"),
19
+ font=[gr.themes.GoogleFont('Noto Sans KR'), gr.themes.GoogleFont('Noto Sans KR'), gr.themes.GoogleFont('Noto Sans KR'), gr.themes.GoogleFont('Noto Sans KR')],
20
+ font_mono=[gr.themes.GoogleFont('Noto Sans KR'), gr.themes.GoogleFont('Noto Sans KR'), gr.themes.GoogleFont('Noto Sans KR'), gr.themes.GoogleFont('Noto Sans KR')],
21
+ )
22
+
23
+
24
+
25
+ """
26
+ RAG Custom Functins
27
+ """
28
  # Global variables
29
  index = None
30
  query_engine = None
 
36
  # Initialize our chosen embedding model
37
  embed_model = HuggingFaceEmbedding(model_name="sentence-transformers/all-MiniLM-L6-v2")
38
 
 
 
 
 
39
  def load_documents(file_objs):
40
  global index, query_engine
41
  try:
 
83
  return None, "", [], "" # Reset file input, load output, chatbot, and message input to default states
84
 
85
 
 
 
 
86
 
87
+ """
88
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
89
+ """
90
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
91
+
92
+ # gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
93
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
94
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
95
+ # gr.Slider(
96
+ # minimum=0.1,
97
+ # maximum=1.0,
98
+ # value=0.95,
99
+ # step=0.05,
100
+ # label="Top-p (nucleus sampling)",
101
+
102
+ def respond(
103
+ message,
104
+ history: list[tuple[str, str]],
105
+ system_message="당신은 μŠ€νƒ λ‹€λ“œμ°¨νƒ€λ“œμ€ν–‰ ν•œκ΅­ μ§€μ‚¬μ˜ μΉœμ ˆν•˜κ³  유λŠ₯ν•œ AI μ–΄μ‹œμŠ€ν„΄νŠΈμž…λ‹ˆλ‹€. ν•œκ΅­μ–΄λ‘œ μ›ν™œν•˜κ²Œ μ†Œν†΅ν•˜λ©°, λ²ˆμ—­, λ¬Έμ„œ μž‘μ„±, 그리고 λ‹€μ–‘ν•œ 업무 지원을 μ‹ μ†ν•˜κ³  μ •ν™•ν•˜κ²Œ μ²˜λ¦¬ν•©λ‹ˆλ‹€. ν•„μš”ν•œ 정보λ₯Ό μ œκ³΅ν•˜κ³ , 직원듀이 업무λ₯Ό 효율적으둜 μˆ˜ν–‰ν•  수 μžˆλ„λ‘ λ•λŠ” 역할을 μˆ˜ν–‰ν•©λ‹ˆλ‹€.",
106
+ max_tokens=512,
107
+ temperature=0.2,
108
+ top_p=0.95,
109
+ ):
110
+ messages = [{"role": "system", "content": system_message}]
111
+
112
+ for val in history:
113
+ if val[0]:
114
+ messages.append({"role": "user", "content": val[0]})
115
+ if val[1]:
116
+ messages.append({"role": "assistant", "content": val[1]})
117
+
118
+ messages.append({"role": "user", "content": message})
119
+
120
+ response = ""
121
+
122
+ for message in client.chat_completion(
123
+ messages,
124
+ max_tokens=max_tokens,
125
+ stream=True,
126
+ temperature=temperature,
127
+ top_p=top_p,
128
+ ):
129
+ token = message.choices[0].delta.content
130
+
131
+ response += token
132
+ yield response
133
+
134
+
135
+ # """
136
+ # For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
137
+ # """
138
+
139
+ # Define the functions for each tab
140
+ def scbk_gpt_response(user_message):
141
+ # Placeholder function for SCBK-GPT response
142
+ response = f"SCBK-GPT: {user_message}"
143
+ return response
144
+
145
+ def data_analysis(data):
146
+ # Placeholder function for data analysis
147
+ df = pd.DataFrame(data)
148
+ summary = df.describe()
149
+ return summary
150
+
151
+ def rag_response(query, document):
152
+ # Placeholder function for RAG response
153
+ # Here you would implement the logic to process the query and the document
154
+ return f"RAG: {query} from {document.name}"
155
+
156
+ def agentic_ai_response(task):
157
+ # Placeholder function for Agentic AI response
158
+ return f"Agentic AI: {task}"
159
+
160
+ custom_css = """
161
+ .contain {
162
+ display: flex;
163
+ flex-direction: column;
164
+ height: 100vh;
165
+ }
166
+ #custom_chatbot {
167
+ flex-grow: 1;
168
+ display: flex;
169
+ flex-direction: column;
170
+ }
171
+ #custom_chatbot .gr-chatbot {
172
+ flex-grow: 1;
173
+ overflow-y: auto;
174
+ }
175
+ """
176
+
177
+ # Create the Gradio app with the Soft theme
178
+ with gr.Blocks(theme=theme, fill_height=True, css=custom_css) as demo:
179
+ # Add a banner with a title and a logo
180
  with gr.Row():
181
+ #gr.Image("/file=logo.png", elem_classes="app-logo", show_download_button=False, width=200) # Logo on the left
182
+ # gr.Markdown("<img src='/file=/logo.png' alt='SCBK' width='200'>")
183
+ # gr.Markdown("# SCBK-GPT Demo by AI Usage 1μ‘°", elem_classes="app-title") # Title on the right
184
+ gr.Markdown(
185
+ """
186
+ <link href="https://fonts.googleapis.com/css2?family=Noto+Sans+KR:wght@400;700&display=swap" rel="stylesheet">
187
+ <div style="display: flex; align-items: center; justify-content: center; background: linear-gradient(to right, #2B3A89, #285FBF); padding: 20px; font-family: 'Noto Sans KR', sans-serif;"">
188
+ <img src="https://i.namu.wiki/i/UUHBBtfx06qUnW4B7oVOiBAAQJ1C3ynKfEI3YjIlvhnuOtkQHejb4_ziBhD7p4n_O9G9LwFz-bRokNibnKaZ9Y8GQsH13OmZTBGuTXBrcS-YkN8ra67jiCaAFcDQDspjbwQk8duLiQ1cX0jg-WsQBA.svg" alt="SCBK" width="200" style="margin-right: 10px;">
189
+ <h1 style="margin: 0; color: white; font-size: 2em;">
190
+ SCBK-GPT<br>
191
+ <span style="font-size: 0.6em; color: white; vertical-align: middle;">by AI Usage 1μ‘°</span>
192
+ </h1>
193
+ </div>
194
+ """
195
+ )
196
+
197
+ # Tab 1: SCBK-GPT
198
+ with gr.Tab("SCBK-GPT"):
199
+ custom_chatbot = gr.Chatbot(elem_id="custom_chatbot")
200
+ gr.ChatInterface(
201
+ respond,
202
+ additional_inputs=[],
203
+ chatbot=custom_chatbot,
204
+ multimodal=False,
205
+ examples=["μ•ˆλ…•ν•˜μ„Έμš”","Asset이 뭔지 μ„€λͺ…ν•΄μ€˜"],
206
+ )
207
+
208
+ # Tab 2: Data Analysis
209
+ with gr.Tab("Data Analysis"):
210
+ data_input = gr.File(label="Upload CSV File", file_types=[".csv"])
211
+ query_input = gr.Textbox(label="Ask a Question")
212
+ analysis_output = gr.Textbox(label="Answer")
213
+ analyze_button = gr.Button("Analyze")
214
+
215
+ def analyze_data(file, query):
216
+ df = pd.read_csv(file.name)
217
+ # Placeholder for actual data analysis logic
218
+ answer = f"Answer to '{query}': {df.head().to_string()}"
219
+ return answer
220
+
221
+ analyze_button.click(analyze_data, inputs=[data_input, query_input], outputs=analysis_output)
222
+
223
+ # Tab 3: RAG
224
+ with gr.Tab("RAG"):
225
+ # document_input = gr.File(label="Upload Document", file_types=[".pdf", ".txt"])
226
+ # query_input = gr.Textbox(label="Query")
227
+ # rag_output = gr.Textbox(label="Response")
228
+ # query_button = gr.Button("Query")
229
+ # query_button.click(rag_response, inputs=[query_input, document_input], outputs=rag_output)
230
+ gr.Markdown("# RAG Multi-file Chat Application")
231
+
232
+ with gr.Row():
233
+ file_input = gr.File(label="Select files to load", file_count="multiple", file_types=[".pdf", ".txt"])
234
+ load_btn = gr.Button("λ¬Έμ„œ μ—…λ‘œλ“œ")
235
+
236
+ load_output = gr.Textbox(label="λ‘œλ”© Status")
237
+
238
+ msg = gr.Textbox(label="μ§ˆλ¬Έμ„ μž…λ ₯ν•΄ μ£Όμ„Έμš”")
239
+ chatbot = gr.Chatbot()
240
+ clear = gr.Button("Clear")
241
 
242
+ # Set up event handlers
243
+ load_btn.click(load_documents, inputs=[file_input], outputs=[load_output])
244
+ msg.submit(perform_rag, inputs=[msg, chatbot], outputs=[chatbot])
245
+ clear.click(clear_all, outputs=[file_input, load_output, chatbot, msg], queue=False)
246
 
 
 
 
247
 
248
+ # Tab 4: Agentic AI
249
+ with gr.Tab("Agentic AI"):
250
+ task_input = gr.Textbox(label="Task")
251
+ agentic_output = gr.Textbox(label="Response")
252
+ task_button = gr.Button("Submit Task")
253
+ task_button.click(agentic_ai_response, inputs=task_input, outputs=agentic_output)
254
 
255
+
256
+ # Launch the app
257
  if __name__ == "__main__":
258
+ demo.launch(show_error=True)