NCTCMumbai commited on
Commit
bc50791
·
verified ·
1 Parent(s): 120cf5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +447 -474
app.py CHANGED
@@ -1,134 +1,95 @@
1
-
 
2
  from ragatouille import RAGPretrainedModel
3
- import subprocess
4
- import json
5
- import spaces
6
- import firebase_admin
7
- from firebase_admin import credentials, firestore
8
  import logging
9
  from pathlib import Path
10
  from time import perf_counter
11
- from datetime import datetime
12
- import gradio as gr
13
- from jinja2 import Environment, FileSystemLoader
14
- import numpy as np
15
  from sentence_transformers import CrossEncoder
16
  from huggingface_hub import InferenceClient
 
 
17
  from os import getenv
18
 
19
- from backend.query_llm import generate_hf, generate_openai
20
- from backend.semantic_search import table, retriever
21
- from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
 
 
24
  VECTOR_COLUMN_NAME = "vector"
25
  TEXT_COLUMN_NAME = "text"
26
  HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
27
  proj_dir = Path(__file__).parent
28
- # Setting up the logging
29
  logging.basicConfig(level=logging.INFO)
30
  logger = logging.getLogger(__name__)
31
- client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1",token=HF_TOKEN)
32
- # Set up the template environment with the templates directory
33
  env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
34
 
35
- # Load the templates directly from the environment
36
  template = env.get_template('template.j2')
37
  template_html = env.get_template('template_html.j2')
38
- #___________________
39
- # service_account_key='firebase.json'
40
- # # Create a Certificate object from the service account info
41
- # cred = credentials.Certificate(service_account_key)
42
- # # Initialize the Firebase Admin
43
- # firebase_admin.initialize_app(cred)
44
-
45
- # # # Create a reference to the Firestore database
46
- # db = firestore.client()
47
- # #db usage
48
- # collection_name = 'Nirvachana' # Replace with your collection name
49
- # field_name = 'message_count' # Replace with your field name for count
50
- # Examples
51
- examples = ['My transhipment cargo is missing','can u explain and tabulate difference between b 17 bond and a warehousing bond',
52
- 'What are benefits of the AEO Scheme and eligibility criteria?',
53
- 'What are penalties for customs offences? ', 'what are penalties to customs officers misusing their powers under customs act?','What are eligibility criteria for exemption from cost recovery charges','list in detail what is procedure for obtaining new approval for openeing a CFS attached to an ICD']
54
-
55
-
56
-
57
- # def get_and_increment_value_count(db , collection_name, field_name):
58
- # """
59
- # Retrieves a value count from the specified Firestore collection and field,
60
- # increments it by 1, and updates the field with the new value."""
61
- # collection_ref = db.collection(collection_name)
62
- # doc_ref = collection_ref.document('count_doc') # Assuming a dedicated document for count
63
-
64
- # # Use a transaction to ensure consistency across reads and writes
65
- # try:
66
- # with db.transaction() as transaction:
67
- # # Get the current value count (or initialize to 0 if it doesn't exist)
68
- # current_count_doc = doc_ref.get()
69
- # current_count_data = current_count_doc.to_dict()
70
- # if current_count_data:
71
- # current_count = current_count_data.get(field_name, 0)
72
- # else:
73
- # current_count = 0
74
- # # Increment the count
75
- # new_count = current_count + 1
76
- # # Update the document with the new count
77
- # transaction.set(doc_ref, {field_name: new_count})
78
- # return new_count
79
- # except Exception as e:
80
- # print(f"Error retrieving and updating value count: {e}")
81
- # return None # Indicate error
82
-
83
- # def update_count_html():
84
- # usage_count = get_and_increment_value_count(db ,collection_name, field_name)
85
- # ccount_html = gr.HTML(value=f"""
86
- # <div style="display: flex; justify-content: flex-end;">
87
- # <span style="font-weight: bold; color: maroon; font-size: 18px;">No of Usages:</span>
88
- # <span style="font-weight: bold; color: maroon; font-size: 18px;">{usage_count}</span>
89
- # </div>
90
- # """)
91
- # return count_html
92
-
93
- # def store_message(db,query,answer,cross_encoder):
94
- # timestamp = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
95
- # # Create a new document reference with a dynamic document name based on timestamp
96
- # new_completion= db.collection('Nirvachana').document(f"chatlogs_{timestamp}")
97
- # new_completion.set({
98
- # 'query': query,
99
- # 'answer':answer,
100
- # 'created_time': firestore.SERVER_TIMESTAMP,
101
- # 'embedding': cross_encoder,
102
- # 'title': 'Expenditure observer bot'
103
- # })
104
-
105
 
106
  def add_text(history, text):
107
  history = [] if history is None else history
108
  history = history + [(text, None)]
109
  return history, gr.Textbox(value="", interactive=False)
110
 
111
-
112
  def bot(history, cross_encoder):
113
  top_rerank = 25
114
  top_k_rank = 20
115
  query = history[-1][0]
116
 
117
  if not query:
118
- gr.Warning("Please submit a non-empty string as a prompt")
119
- raise ValueError("Empty string was submitted")
120
 
121
  logger.warning('Retrieving documents...')
122
 
123
- # if COLBERT RAGATATOUILLE PROCEDURE :
124
- if cross_encoder=='(HIGH ACCURATE) ColBERT':
125
  gr.Warning('Retrieving using ColBERT.. First time query will take a minute for model to load..pls wait')
126
- RAG= RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
127
- RAG_db=RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
128
- documents_full=RAG_db.search(query,k=top_k_rank)
129
 
130
- documents=[item['content'] for item in documents_full]
131
- # Create Prompt
132
  prompt = template.render(documents=documents, query=query)
133
  prompt_html = template_html.render(documents=documents, query=query)
134
 
@@ -138,40 +99,28 @@ def bot(history, cross_encoder):
138
  for character in generate_fn(prompt, history[:-1]):
139
  history[-1][1] = character
140
  yield history, prompt_html
141
- print('Final history is ',history)
142
- #store_message(db,history[-1][0],history[-1][1],cross_encoder)
143
  else:
144
- # Retrieve documents relevant to query
145
  document_start = perf_counter()
146
 
147
  query_vec = retriever.encode(query)
148
- logger.warning(f'Finished query vec')
149
  doc1 = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_k_rank)
150
 
151
-
152
-
153
- logger.warning(f'Finished search')
154
  documents = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_rerank).to_list()
155
  documents = [doc[TEXT_COLUMN_NAME] for doc in documents]
156
- logger.warning(f'start cross encoder {len(documents)}')
157
- # Retrieve documents relevant to query
158
  query_doc_pair = [[query, doc] for doc in documents]
159
- if cross_encoder=='(FAST) MiniLM-L6v2' :
160
- cross_encoder1 = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
161
- elif cross_encoder=='(ACCURATE) BGE reranker':
162
- cross_encoder1 = CrossEncoder('BAAI/bge-reranker-base')
163
 
164
  cross_scores = cross_encoder1.predict(query_doc_pair)
165
  sim_scores_argsort = list(reversed(np.argsort(cross_scores)))
166
- logger.warning(f'Finished cross encoder {len(documents)}')
167
 
168
  documents = [documents[idx] for idx in sim_scores_argsort[:top_k_rank]]
169
- logger.warning(f'num documents {len(documents)}')
170
 
171
  document_time = perf_counter() - document_start
172
- logger.warning(f'Finished Retrieving documents in {round(document_time, 2)} seconds...')
173
 
174
- # Create Prompt
175
  prompt = template.render(documents=documents, query=query)
176
  prompt_html = template_html.render(documents=documents, query=query)
177
 
@@ -179,412 +128,436 @@ def bot(history, cross_encoder):
179
 
180
  history[-1][1] = ""
181
  for character in generate_fn(prompt, history[:-1]):
182
- history[-1][1] = character
183
  yield history, prompt_html
184
- print('Final history is ',history)
185
- #store_message(db,history[-1][0],history[-1][1],cross_encoder)
186
-
187
- def system_instructions(question_difficulty, topic,documents_str):
188
- return f"""<s> [INST] Your are a great teacher and your task is to create 10 questions with 4 choices with a {question_difficulty} difficulty about topic request " {topic} " only from the below given documents, {documents_str} then create an answers. Index in JSON format, the questions as "Q#":"" to "Q#":"", the four choices as "Q#:C1":"" to "Q#:C4":"", and the answers as "A#":"Q#:C#" to "A#":"Q#:C#". [/INST]"""
189
 
190
- RAG_db = gr.State()
191
-
192
- def load_model():
193
- try:
194
- # Initialize the model
195
- RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
196
- # Load the RAG database
197
- RAG_db.value = RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
198
- return 'Ready to Go!!'
199
- except Exception as e:
200
- return f"Error loading model: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
 
 
 
202
 
203
- def generate_quiz(question_difficulty, topic):
204
- if not topic.strip():
205
- return ['Please enter a valid topic.'] + [gr.Radio(visible=False) for _ in range(10)]
206
-
207
- top_k_rank = 10
208
- # Load the model and database within the generate_quiz function
209
- try:
210
- RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
211
- RAG_db_ = RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
212
- gr.Warning('Model loaded!')
213
- except Exception as e:
214
- return [f"Error loading model: {e}"] + [gr.Radio(visible=False) for _ in range(10)]
215
-
216
- RAG_db_ = RAG_db.value
217
- documents_full = RAG_db_.search(topic, k=top_k_rank)
218
-
219
- generate_kwargs = dict(
220
- temperature=0.2,
221
- max_new_tokens=4000,
222
- top_p=0.95,
223
- repetition_penalty=1.0,
224
- do_sample=True,
225
- seed=42,
226
- )
227
-
228
- question_radio_list = []
229
- count = 0
230
- while count <= 3:
231
- try:
232
- documents = [item['content'] for item in documents_full]
233
- document_summaries = [f"[DOCUMENT {i+1}]: {summary}{count}" for i, summary in enumerate(documents)]
234
- documents_str = '\n'.join(document_summaries)
235
- formatted_prompt = system_instructions(question_difficulty, topic, documents_str)
236
-
237
- pre_prompt = [
238
- {"role": "system", "content": formatted_prompt}
239
- ]
240
- response = client.text_generation(
241
- formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False,
242
- )
243
- output_json = json.loads(f"{response}")
244
-
245
- global quiz_data
246
- quiz_data = output_json
247
-
248
- for question_num in range(1, 11):
249
- question_key = f"Q{question_num}"
250
- answer_key = f"A{question_num}"
251
- question = quiz_data.get(question_key)
252
- answer = quiz_data.get(quiz_data.get(answer_key))
253
-
254
- if not question or not answer:
255
- continue
256
-
257
- choice_keys = [f"{question_key}:C{i}" for i in range(1, 5)]
258
- choice_list = [quiz_data.get(choice_key, "Choice not found") for choice_key in choice_keys]
259
-
260
- radio = gr.Radio(choices=choice_list, label=question, visible=True, interactive=True)
261
- question_radio_list.append(radio)
262
-
263
- if len(question_radio_list) == 10:
264
- break
265
- else:
266
- count += 1
267
- continue
268
- except Exception as e:
269
- count += 1
270
- if count == 3:
271
- return ['Sorry. Pls try with another topic!'] + [gr.Radio(visible=False) for _ in range(10)]
272
- continue
273
-
274
- return ['Quiz Generated!'] + question_radio_list
275
-
276
- def compare_answers(*user_answers):
277
- user_answer_list = user_answers
278
- answers_list = [quiz_data.get(quiz_data.get(f"A{question_num}")) for question_num in range(1, 11)]
279
-
280
- score = sum(1 for answer in user_answer_list if answer in answers_list)
281
-
282
- if score > 7:
283
- message = f"### Excellent! You got {score} out of 10!"
284
- elif score > 5:
285
- message = f"### Good! You got {score} out of 10!"
286
- else:
287
- message = f"### You got {score} out of 10! Don’t worry, you can prepare well and try better next time!"
288
-
289
- return message
290
-
291
- #with gr.Blocks(theme='Insuz/SimpleIndigo') as demo:
292
  with gr.Blocks(theme='NoCrypt/miku') as CHATBOT:
293
  with gr.Row():
294
  with gr.Column(scale=10):
295
- # gr.Markdown(
296
- # """
297
- # # Theme preview: `paris`
298
- # To use this theme, set `theme='earneleh/paris'` in `gr.Blocks()` or `gr.Interface()`.
299
- # You can append an `@` and a semantic version expression, e.g. @>=1.0.0,<2.0.0 to pin to a given version
300
- # of this theme.
301
- # """
302
- # )
303
- gr.HTML(value="""<div style="color: #FF4500;"><h1>ADWITIYA-</h1> <h1><span style="color: #008000">Custom Manual Chatbot and Quizbot</span></h1>
304
- </div>""", elem_id='heading')
305
-
306
- gr.HTML(value=f"""
307
- <p style="font-family: sans-serif; font-size: 16px;">
308
- Using GenAI for CBIC Capacity Building - A free chat bot developed by National Customs Targeting Center using Open source LLMs for CBIC Officers
309
- </p>
310
- """, elem_id='Sub-heading')
311
- #usage_count = get_and_increment_value_count(db,collection_name, field_name)
312
- gr.HTML(value=f"""<p style="font-family: Arial, sans-serif; font-size: 14px;">Developed by NCTC,Mumbai . Suggestions may be sent to <a href="mailto:nctc-admin@gov.in" style="color: #00008B; font-style: italic;">ramyadevi1607@yahoo.com</a>.</p>""", elem_id='Sub-heading1 ')
313
 
314
  with gr.Column(scale=3):
315
- gr.Image(value='logo.png',height=200,width=200)
316
-
317
-
318
- # gr.HTML(value="""<div style="color: #FF4500;"><h1>CHEERFULL CBSE-</h1> <h1><span style="color: #008000">AI Assisted Fun Learning</span></h1>
319
- # <img src='logo.png' alt="Chatbot" width="50" height="50" />
320
- # </div>""", elem_id='heading')
321
-
322
- # gr.HTML(value=f"""
323
- # <p style="font-family: sans-serif; font-size: 16px;">
324
- # A free Artificial Intelligence Chatbot assistant trained on CBSE Class 10 Science Notes to engage and help students and teachers of Puducherry.
325
- # </p>
326
- # """, elem_id='Sub-heading')
327
- # #usage_count = get_and_increment_value_count(db,collection_name, field_name)
328
- # gr.HTML(value=f"""<p style="font-family: Arial, sans-serif; font-size: 16px;">Developed by K M Ramyasri , PGT . Suggestions may be sent to <a href="mailto:ramyadevi1607@yahoo.com" style="color: #00008B; font-style: italic;">ramyadevi1607@yahoo.com</a>.</p>""", elem_id='Sub-heading1 ')
329
- # # count_html = gr.HTML(value=f"""
330
- # # <div style="display: flex; justify-content: flex-end;">
331
- # # <span style="font-weight: bold; color: maroon; font-size: 18px;">No of Usages:</span>
332
- # # <span style="font-weight: bold; color: maroon; font-size: 18px;">{usage_count}</span>
333
- # # </div>
334
- # # """)
335
-
336
  chatbot = gr.Chatbot(
337
- [],
338
- elem_id="chatbot",
339
- avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
340
- 'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
341
- bubble_full_width=False,
342
- show_copy_button=True,
343
- show_share_button=True,
344
- )
345
 
346
  with gr.Row():
347
  txt = gr.Textbox(
348
- scale=3,
349
- show_label=False,
350
- placeholder="Enter text and press enter",
351
- container=False,
352
- )
353
  txt_btn = gr.Button(value="Submit text", scale=1)
354
-
355
- cross_encoder = gr.Radio(choices=['(FAST) MiniLM-L6v2','(ACCURATE) BGE reranker','(HIGH ACCURATE) ColBERT'], value='(ACCURATE) BGE reranker',label="Embeddings", info="Only First query to Colbert may take litte time)")
356
-
 
 
 
 
 
 
 
 
 
357
  prompt_html = gr.HTML()
358
- # Turn off interactivity while generating if you click
359
- txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
360
- bot, [chatbot, cross_encoder], [chatbot, prompt_html])#.then(update_count_html,[],[count_html])
361
 
362
- # Turn it back on
363
- txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
364
-
365
- # Turn off interactivity while generating if you hit enter
 
366
  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
367
- bot, [chatbot, cross_encoder], [chatbot, prompt_html])#.then(update_count_html,[],[count_html])
368
-
369
- # Turn it back on
370
- txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
371
-
372
- # Examples
373
- gr.Examples(examples, txt)
374
-
375
-
376
-
377
 
378
- with gr.Blocks(title="Quiz Maker", theme=gr.themes.Default(primary_hue="green", secondary_hue="green"), css="style.css") as QUIZBOT:
379
- with gr.Column(scale=4):
380
- gr.HTML("""
381
- <center>
382
- <h1><span style="color: purple;">ADWITIYA</span> Customs Manual Quizbot</h1>
383
- <h2>Generative AI-powered Capacity building for Training Officers</h2>
384
- <i>⚠️ NACIN Faculties create quiz from any topic dynamically for classroom evaluation after their sessions! ⚠️</i>
385
- </center>
386
- """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387
 
388
- with gr.Column(scale=2):
389
- gr.HTML("""
390
- <center>
391
-
392
- <h2>Ready!</h2>
393
-
394
- </center>
395
- """)
396
- # load_btn = gr.Button("Click to Load!🚀")
397
- # load_text = gr.Textbox()
398
- # load_btn.click(fn=load_model, outputs=load_text)
399
 
400
- topic = gr.Textbox(label="Enter the Topic for Quiz", placeholder="Write any topic/details from Customs Manual")
401
-
402
- with gr.Row():
403
- radio = gr.Radio(["easy", "average", "hard"], label="How difficult should the quiz be?")
404
 
405
- generate_quiz_btn = gr.Button("Generate Quiz!🚀")
406
- quiz_msg = gr.Textbox()
 
 
 
 
 
 
 
407
 
408
- question_radios = [gr.Radio(visible=False) for _ in range(10)]
 
 
409
 
410
- generate_quiz_btn.click(
411
- fn=generate_quiz,
412
- inputs=[radio, topic],
413
- outputs=[quiz_msg] + question_radios
414
- )
415
 
416
- check_button = gr.Button("Check Score")
417
- score_textbox = gr.Markdown()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
418
 
419
- check_button.click(
420
- fn=compare_answers,
421
- inputs=question_radios,
422
- outputs=score_textbox
423
- )
424
-
425
- demo = gr.TabbedInterface([CHATBOT, QUIZBOT], ["AI ChatBot", "AI Quizbot"])
426
- demo.queue()
427
- demo.launch(debug=True)
428
 
429
- # RAG_db=gr.State()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
430
 
431
- # with gr.Blocks(title="Quiz Maker", theme=gr.themes.Default(primary_hue="green", secondary_hue="green"), css="style.css") as QUIZBOT:
432
- # def load_model():
433
- # RAG= RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
434
- # RAG_db.value=RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
435
- # return 'Ready to Go!!'
436
- # with gr.Column(scale=4):
437
- # gr.HTML("""
438
- # <center>
439
- # <h1><span style="color: purple;">ADWITIYA</span> Customs Manual Quizbot</h1>
440
- # <h2>Generative AI-powered Capacity building for Training Officers</h2>
441
- # <i>⚠️ NACIN Faculties create quiz from any topic dynamically for classroom evaluation after their sessions ! ⚠️</i>
442
- # </center>
443
- # """)
444
- # #gr.Warning('Retrieving using ColBERT.. First time query will take a minute for model to load..pls wait')
445
- # with gr.Column(scale=2):
446
- # load_btn = gr.Button("Click to Load!🚀")
447
- # load_text=gr.Textbox()
448
- # load_btn.click(load_model,[],load_text)
449
-
450
-
451
- # topic = gr.Textbox(label="Enter the Topic for Quiz", placeholder="Write any topic/details from Customs Manual")
452
 
 
 
453
  # with gr.Row():
454
- # radio = gr.Radio(
455
- # ["easy", "average", "hard"], label="How difficult should the quiz be?"
456
- # )
457
-
458
-
459
- # generate_quiz_btn = gr.Button("Generate Quiz!🚀")
460
- # quiz_msg=gr.Textbox()
461
-
462
- # question_radios = [gr.Radio(visible=False), gr.Radio(visible=False), gr.Radio(
463
- # visible=False), gr.Radio(visible=False), gr.Radio(visible=False), gr.Radio(visible=False), gr.Radio(visible=False), gr.Radio(
464
- # visible=False), gr.Radio(visible=False), gr.Radio(visible=False)]
 
 
 
 
 
 
 
 
465
 
466
- # print(question_radios)
 
467
 
468
- # @spaces.GPU
469
- # @generate_quiz_btn.click(inputs=[radio, topic], outputs=[quiz_msg]+question_radios, api_name="generate_quiz")
470
- # def generate_quiz(question_difficulty, topic):
471
- # top_k_rank=10
472
- # RAG_db_=RAG_db.value
473
- # documents_full=RAG_db_.search(topic,k=top_k_rank)
474
-
475
-
 
 
476
 
477
- # generate_kwargs = dict(
478
- # temperature=0.2,
479
- # max_new_tokens=4000,
480
- # top_p=0.95,
481
- # repetition_penalty=1.0,
482
- # do_sample=True,
483
- # seed=42,
484
- # )
485
- # question_radio_list = []
486
- # count=0
487
- # while count<=3:
488
- # try:
489
- # documents=[item['content'] for item in documents_full]
490
- # document_summaries = [f"[DOCUMENT {i+1}]: {summary}{count}" for i, summary in enumerate(documents)]
491
- # documents_str='\n'.join(document_summaries)
492
- # formatted_prompt = system_instructions(
493
- # question_difficulty, topic,documents_str)
494
- # print(formatted_prompt)
495
- # pre_prompt = [
496
- # {"role": "system", "content": formatted_prompt}
497
- # ]
498
- # response = client.text_generation(
499
- # formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False,
500
  # )
501
- # output_json = json.loads(f"{response}")
502
-
503
-
504
- # print(response)
505
- # print('output json', output_json)
506
-
507
- # global quiz_data
508
-
509
- # quiz_data = output_json
510
-
511
-
512
-
513
- # for question_num in range(1, 11):
514
- # question_key = f"Q{question_num}"
515
- # answer_key = f"A{question_num}"
516
-
517
- # question = quiz_data.get(question_key)
518
- # answer = quiz_data.get(quiz_data.get(answer_key))
519
-
520
- # if not question or not answer:
521
- # continue
522
-
523
- # choice_keys = [f"{question_key}:C{i}" for i in range(1, 5)]
524
- # choice_list = []
525
- # for choice_key in choice_keys:
526
- # choice = quiz_data.get(choice_key, "Choice not found")
527
- # choice_list.append(f"{choice}")
528
-
529
- # radio = gr.Radio(choices=choice_list, label=question,
530
- # visible=True, interactive=True)
531
-
532
- # question_radio_list.append(radio)
533
- # if len(question_radio_list)==10:
534
- # break
535
- # else:
536
- # print('10 questions not generated . So trying again!')
537
- # count+=1
538
- # continue
539
- # except Exception as e:
540
- # count+=1
541
- # print(f"Exception occurred: {e}")
542
- # if count==3:
543
- # print('Retry exhausted')
544
- # gr.Warning('Sorry. Pls try with another topic !')
545
- # else:
546
- # print(f"Trying again..{count} time...please wait")
547
- # continue
548
-
549
- # print('Question radio list ' , question_radio_list)
550
 
551
- # return ['Quiz Generated!']+ question_radio_list
552
 
553
- # check_button = gr.Button("Check Score")
 
 
 
554
 
555
- # score_textbox = gr.Markdown()
 
556
 
557
- # @check_button.click(inputs=question_radios, outputs=score_textbox)
558
- # def compare_answers(*user_answers):
559
- # user_anwser_list = []
560
- # user_anwser_list = user_answers
561
 
562
- # answers_list = []
 
563
 
564
- # for question_num in range(1, 20):
565
- # answer_key = f"A{question_num}"
566
- # answer = quiz_data.get(quiz_data.get(answer_key))
567
- # if not answer:
568
- # break
569
- # answers_list.append(answer)
570
 
571
- # score = 0
572
 
573
- # for item in user_anwser_list:
574
- # if item in answers_list:
575
- # score += 1
576
- # if score>5:
577
- # message = f"### Good ! You got {score} over 10!"
578
- # elif score>7:
579
- # message = f"### Excellent ! You got {score} over 10!"
580
- # else:
581
- # message = f"### You got {score} over 10! Dont worry . You can prepare well and try better next time !"
582
 
583
- # return message
584
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
585
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
586
 
587
- # demo = gr.TabbedInterface([CHATBOT,QUIZBOT], ["AI ChatBot", "AI Quizbot"])
 
 
588
 
589
- # demo.queue()
590
- # demo.launch(debug=True)
 
1
+ import requests
2
+ import gradio as gr
3
  from ragatouille import RAGPretrainedModel
 
 
 
 
 
4
  import logging
5
  from pathlib import Path
6
  from time import perf_counter
 
 
 
 
7
  from sentence_transformers import CrossEncoder
8
  from huggingface_hub import InferenceClient
9
+ from jinja2 import Environment, FileSystemLoader
10
+ import numpy as np
11
  from os import getenv
12
 
13
+ # Bhashini API translation function
14
+ api_key = '22fedab828-fff6-4562-8c1b-2a95ebdb5276'
15
+ user_id = '7e477b6801dc4ff9a70a6ac13939b1e6'
16
+
17
+ def bhashini_translate(text: str, from_code: str = "en", to_code: str = "te") -> dict:
18
+ """Translates text from source language to target language using the Bhashini API."""
19
+ url = 'https://meity-auth.ulcacontrib.org/ulca/apis/v0/model/getModelsPipeline'
20
+ headers = {
21
+ "Content-Type": "application/json",
22
+ "userID": user_id,
23
+ "ulcaApiKey": api_key
24
+ }
25
+ payload = {
26
+ "pipelineTasks": [{"taskType": "translation", "config": {"language": {"sourceLanguage": from_code, "targetLanguage": to_code}}}],
27
+ "pipelineRequestConfig": {"pipelineId": "64392f96daac500b55c543cd"}
28
+ }
29
+ response = requests.post(url, json=payload, headers=headers)
30
+
31
+ if response.status_code != 200:
32
+ return {"status_code": response.status_code, "message": "Error in translation request", "translated_content": None}
33
+
34
+ response_data = response.json()
35
+ service_id = response_data["pipelineResponseConfig"][0]["config"][0]["serviceId"]
36
+ callback_url = response_data["pipelineInferenceAPIEndPoint"]["callbackUrl"]
37
+ headers2 = {
38
+ "Content-Type": "application/json",
39
+ response_data["pipelineInferenceAPIEndPoint"]["inferenceApiKey"]["name"]: response_data["pipelineInferenceAPIEndPoint"]["inferenceApiKey"]["value"]
40
+ }
41
+ compute_payload = {
42
+ "pipelineTasks": [{"taskType": "translation", "config": {"language": {"sourceLanguage": from_code, "targetLanguage": to_code}, "serviceId": service_id}}],
43
+ "inputData": {"input": [{"source": text}], "audio": [{"audioContent": None}]}
44
+ }
45
+
46
+ compute_response = requests.post(callback_url, json=compute_payload, headers=headers2)
47
+ if compute_response.status_code != 200:
48
+ return {"status_code": compute_response.status_code, "message": "Error in translation", "translated_content": None}
49
+
50
+ compute_response_data = compute_response.json()
51
+ translated_content = compute_response_data["pipelineResponse"][0]["output"][0]["target"]
52
+
53
+ return {"status_code": 200, "message": "Translation successful", "translated_content": translated_content}
54
 
55
 
56
+ # Existing chatbot functions
57
  VECTOR_COLUMN_NAME = "vector"
58
  TEXT_COLUMN_NAME = "text"
59
  HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
60
  proj_dir = Path(__file__).parent
61
+
62
  logging.basicConfig(level=logging.INFO)
63
  logger = logging.getLogger(__name__)
64
+ client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1", token=HF_TOKEN)
 
65
  env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
66
 
 
67
  template = env.get_template('template.j2')
68
  template_html = env.get_template('template_html.j2')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  def add_text(history, text):
71
  history = [] if history is None else history
72
  history = history + [(text, None)]
73
  return history, gr.Textbox(value="", interactive=False)
74
 
 
75
  def bot(history, cross_encoder):
76
  top_rerank = 25
77
  top_k_rank = 20
78
  query = history[-1][0]
79
 
80
  if not query:
81
+ gr.Warning("Please submit a non-empty string as a prompt")
82
+ raise ValueError("Empty string was submitted")
83
 
84
  logger.warning('Retrieving documents...')
85
 
86
+ if cross_encoder == '(HIGH ACCURATE) ColBERT':
 
87
  gr.Warning('Retrieving using ColBERT.. First time query will take a minute for model to load..pls wait')
88
+ RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
89
+ RAG_db = RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
90
+ documents_full = RAG_db.search(query, k=top_k_rank)
91
 
92
+ documents = [item['content'] for item in documents_full]
 
93
  prompt = template.render(documents=documents, query=query)
94
  prompt_html = template_html.render(documents=documents, query=query)
95
 
 
99
  for character in generate_fn(prompt, history[:-1]):
100
  history[-1][1] = character
101
  yield history, prompt_html
 
 
102
  else:
 
103
  document_start = perf_counter()
104
 
105
  query_vec = retriever.encode(query)
 
106
  doc1 = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_k_rank)
107
 
 
 
 
108
  documents = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_rerank).to_list()
109
  documents = [doc[TEXT_COLUMN_NAME] for doc in documents]
110
+
 
111
  query_doc_pair = [[query, doc] for doc in documents]
112
+ if cross_encoder == '(FAST) MiniLM-L6v2':
113
+ cross_encoder1 = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
114
+ elif cross_encoder == '(ACCURATE) BGE reranker':
115
+ cross_encoder1 = CrossEncoder('BAAI/bge-reranker-base')
116
 
117
  cross_scores = cross_encoder1.predict(query_doc_pair)
118
  sim_scores_argsort = list(reversed(np.argsort(cross_scores)))
 
119
 
120
  documents = [documents[idx] for idx in sim_scores_argsort[:top_k_rank]]
 
121
 
122
  document_time = perf_counter() - document_start
 
123
 
 
124
  prompt = template.render(documents=documents, query=query)
125
  prompt_html = template_html.render(documents=documents, query=query)
126
 
 
128
 
129
  history[-1][1] = ""
130
  for character in generate_fn(prompt, history[:-1]):
131
+ history[-1][1] = character
132
  yield history, prompt_html
 
 
 
 
 
133
 
134
+ def translate_text(response_text, selected_language):
135
+ iso_language_codes = {
136
+ "Hindi": "hi",
137
+ "Gom": "gom",
138
+ "Kannada": "kn",
139
+ "Dogri": "doi",
140
+ "Bodo": "brx",
141
+ "Urdu": "ur",
142
+ "Tamil": "ta",
143
+ "Kashmiri": "ks",
144
+ "Assamese": "as",
145
+ "Bengali": "bn",
146
+ "Marathi": "mr",
147
+ "Sindhi": "sd",
148
+ "Maithili": "mai",
149
+ "Punjabi": "pa",
150
+ "Malayalam": "ml",
151
+ "Manipuri": "mni",
152
+ "Telugu": "te",
153
+ "Sanskrit": "sa",
154
+ "Nepali": "ne",
155
+ "Santali": "sat",
156
+ "Gujarati": "gu",
157
+ "Odia": "or"
158
+ }
159
 
160
+ to_code = iso_language_codes[selected_language]
161
+ translation = bhashini_translate(response_text, to_code=to_code)
162
+ return translation['translated_content']
163
 
164
+ # Gradio interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  with gr.Blocks(theme='NoCrypt/miku') as CHATBOT:
166
  with gr.Row():
167
  with gr.Column(scale=10):
168
+ gr.HTML(value="""<div style="color: #FF4500;"><h1>ADWITIYA-</h1> <h1><span style="color: #008000">Custom Manual Chatbot and Quizbot</span></h1></div>""")
169
+ gr.HTML(value=f"""<p style="font-family: sans-serif; font-size: 16px;">Using GenAI for CBIC Capacity Building - A free chat bot developed by National Customs Targeting Center using Open source LLMs for CBIC Officers</p>""")
170
+ gr.HTML(value=f"""<p style="font-family: Arial, sans-serif; font-size: 14px;">Developed by NCTC,Mumbai. Suggestions may be sent to <a href="mailto:nctc-admin@gov.in" style="color: #00008B; font-style: italic;">ramyadevi1607@yahoo.com</a>.</p>""")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
 
172
  with gr.Column(scale=3):
173
+ gr.Image(value='logo.png', height=200, width=200)
174
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
175
  chatbot = gr.Chatbot(
176
+ [],
177
+ elem_id="chatbot",
178
+ avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
179
+ 'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
180
+ bubble_full_width=False,
181
+ show_copy_button=True,
182
+ show_share_button=True,
183
+ )
184
 
185
  with gr.Row():
186
  txt = gr.Textbox(
187
+ scale=3,
188
+ show_label=False,
189
+ placeholder="Enter text and press enter",
190
+ container=False,
191
+ )
192
  txt_btn = gr.Button(value="Submit text", scale=1)
193
+
194
+ cross_encoder = gr.Radio(choices=['(FAST) MiniLM-L6v2', '(ACCURATE) BGE reranker', '(HIGH ACCURATE) ColBERT'], value='(ACCURATE) BGE reranker', label="Embeddings", info="Only First query to Colbert may take little time)")
195
+ language_dropdown = gr.Dropdown(
196
+ choices=[
197
+ "Hindi", "Gom", "Kannada", "Dogri", "Bodo", "Urdu", "Tamil", "Kashmiri", "Assamese", "Bengali", "Marathi",
198
+ "Sindhi", "Maithili", "Punjabi", "Malayalam", "Manipuri", "Telugu", "Sanskrit", "Nepali", "Santali",
199
+ "Gujarati", "Odia"
200
+ ],
201
+ value="Hindi", # default to Hindi
202
+ label="Select Language for Translation"
203
+ )
204
+
205
  prompt_html = gr.HTML()
206
+ translated_textbox = gr.Textbox(label="Translated Response")
 
 
207
 
208
+ txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
209
+ bot, [chatbot, cross_encoder], [chatbot, prompt_html]).then(
210
+ translate_text, [txt, language_dropdown], translated_textbox
211
+ )
212
+
213
  txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
214
+ bot, [chatbot, cross_encoder], [chatbot, prompt_html]).then(
215
+ translate_text, [txt, language_dropdown], translated_textbox
216
+ )
 
 
 
 
 
 
 
217
 
218
+ # Launch the Gradio application
219
+ CHATBOT.launch(share=True)
220
+
221
+ # from ragatouille import RAGPretrainedModel
222
+ # import subprocess
223
+ # import json
224
+ # import spaces
225
+ # import firebase_admin
226
+ # from firebase_admin import credentials, firestore
227
+ # import logging
228
+ # from pathlib import Path
229
+ # from time import perf_counter
230
+ # from datetime import datetime
231
+ # import gradio as gr
232
+ # from jinja2 import Environment, FileSystemLoader
233
+ # import numpy as np
234
+ # from sentence_transformers import CrossEncoder
235
+ # from huggingface_hub import InferenceClient
236
+ # from os import getenv
237
+
238
+ # from backend.query_llm import generate_hf, generate_openai
239
+ # from backend.semantic_search import table, retriever
240
+ # from huggingface_hub import InferenceClient
241
+
242
+
243
+ # VECTOR_COLUMN_NAME = "vector"
244
+ # TEXT_COLUMN_NAME = "text"
245
+ # HF_TOKEN = getenv("HUGGING_FACE_HUB_TOKEN")
246
+ # proj_dir = Path(__file__).parent
247
+ # # Setting up the logging
248
+ # logging.basicConfig(level=logging.INFO)
249
+ # logger = logging.getLogger(__name__)
250
+ # client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1",token=HF_TOKEN)
251
+ # # Set up the template environment with the templates directory
252
+ # env = Environment(loader=FileSystemLoader(proj_dir / 'templates'))
253
+
254
+ # # Load the templates directly from the environment
255
+ # template = env.get_template('template.j2')
256
+ # template_html = env.get_template('template_html.j2')
257
+
258
+
259
+ # def add_text(history, text):
260
+ # history = [] if history is None else history
261
+ # history = history + [(text, None)]
262
+ # return history, gr.Textbox(value="", interactive=False)
263
+
264
+
265
+ # def bot(history, cross_encoder):
266
+ # top_rerank = 25
267
+ # top_k_rank = 20
268
+ # query = history[-1][0]
269
+
270
+ # if not query:
271
+ # gr.Warning("Please submit a non-empty string as a prompt")
272
+ # raise ValueError("Empty string was submitted")
273
+
274
+ # logger.warning('Retrieving documents...')
275
 
276
+ # # if COLBERT RAGATATOUILLE PROCEDURE :
277
+ # if cross_encoder=='(HIGH ACCURATE) ColBERT':
278
+ # gr.Warning('Retrieving using ColBERT.. First time query will take a minute for model to load..pls wait')
279
+ # RAG= RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
280
+ # RAG_db=RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
281
+ # documents_full=RAG_db.search(query,k=top_k_rank)
282
+
283
+ # documents=[item['content'] for item in documents_full]
284
+ # # Create Prompt
285
+ # prompt = template.render(documents=documents, query=query)
286
+ # prompt_html = template_html.render(documents=documents, query=query)
287
 
288
+ # generate_fn = generate_hf
 
 
 
289
 
290
+ # history[-1][1] = ""
291
+ # for character in generate_fn(prompt, history[:-1]):
292
+ # history[-1][1] = character
293
+ # yield history, prompt_html
294
+ # print('Final history is ',history)
295
+ # #store_message(db,history[-1][0],history[-1][1],cross_encoder)
296
+ # else:
297
+ # # Retrieve documents relevant to query
298
+ # document_start = perf_counter()
299
 
300
+ # query_vec = retriever.encode(query)
301
+ # logger.warning(f'Finished query vec')
302
+ # doc1 = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_k_rank)
303
 
304
+
 
 
 
 
305
 
306
+ # logger.warning(f'Finished search')
307
+ # documents = table.search(query_vec, vector_column_name=VECTOR_COLUMN_NAME).limit(top_rerank).to_list()
308
+ # documents = [doc[TEXT_COLUMN_NAME] for doc in documents]
309
+ # logger.warning(f'start cross encoder {len(documents)}')
310
+ # # Retrieve documents relevant to query
311
+ # query_doc_pair = [[query, doc] for doc in documents]
312
+ # if cross_encoder=='(FAST) MiniLM-L6v2' :
313
+ # cross_encoder1 = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
314
+ # elif cross_encoder=='(ACCURATE) BGE reranker':
315
+ # cross_encoder1 = CrossEncoder('BAAI/bge-reranker-base')
316
+
317
+ # cross_scores = cross_encoder1.predict(query_doc_pair)
318
+ # sim_scores_argsort = list(reversed(np.argsort(cross_scores)))
319
+ # logger.warning(f'Finished cross encoder {len(documents)}')
320
+
321
+ # documents = [documents[idx] for idx in sim_scores_argsort[:top_k_rank]]
322
+ # logger.warning(f'num documents {len(documents)}')
323
+
324
+ # document_time = perf_counter() - document_start
325
+ # logger.warning(f'Finished Retrieving documents in {round(document_time, 2)} seconds...')
326
+
327
+ # # Create Prompt
328
+ # prompt = template.render(documents=documents, query=query)
329
+ # prompt_html = template_html.render(documents=documents, query=query)
330
+
331
+ # generate_fn = generate_hf
332
+
333
+ # history[-1][1] = ""
334
+ # for character in generate_fn(prompt, history[:-1]):
335
+ # history[-1][1] = character
336
+ # yield history, prompt_html
337
+ # print('Final history is ',history)
338
+ # #store_message(db,history[-1][0],history[-1][1],cross_encoder)
339
+
340
+ # # def system_instructions(question_difficulty, topic,documents_str):
341
+ # # return f"""<s> [INST] Your are a great teacher and your task is to create 10 questions with 4 choices with a {question_difficulty} difficulty about topic request " {topic} " only from the below given documents, {documents_str} then create an answers. Index in JSON format, the questions as "Q#":"" to "Q#":"", the four choices as "Q#:C1":"" to "Q#:C4":"", and the answers as "A#":"Q#:C#" to "A#":"Q#:C#". [/INST]"""
342
+
343
+ # RAG_db = gr.State()
344
+
345
+ # # def load_model():
346
+ # # try:
347
+ # # # Initialize the model
348
+ # # RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
349
+ # # # Load the RAG database
350
+ # # RAG_db.value = RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
351
+ # # return 'Ready to Go!!'
352
+ # # except Exception as e:
353
+ # # return f"Error loading model: {e}"
354
 
 
 
 
 
 
 
 
 
 
355
 
356
+ # # def generate_quiz(question_difficulty, topic):
357
+ # # if not topic.strip():
358
+ # # return ['Please enter a valid topic.'] + [gr.Radio(visible=False) for _ in range(10)]
359
+
360
+ # # top_k_rank = 10
361
+ # # # Load the model and database within the generate_quiz function
362
+ # # try:
363
+ # # RAG = RAGPretrainedModel.from_pretrained("colbert-ir/colbertv2.0")
364
+ # # RAG_db_ = RAG.from_index('.ragatouille/colbert/indexes/cbseclass10index')
365
+ # # gr.Warning('Model loaded!')
366
+ # # except Exception as e:
367
+ # # return [f"Error loading model: {e}"] + [gr.Radio(visible=False) for _ in range(10)]
368
+
369
+ # # RAG_db_ = RAG_db.value
370
+ # # documents_full = RAG_db_.search(topic, k=top_k_rank)
371
+
372
+ # # generate_kwargs = dict(
373
+ # # temperature=0.2,
374
+ # # max_new_tokens=4000,
375
+ # # top_p=0.95,
376
+ # # repetition_penalty=1.0,
377
+ # # do_sample=True,
378
+ # # seed=42,
379
+ # # )
380
+
381
+ # # question_radio_list = []
382
+ # # count = 0
383
+ # # while count <= 3:
384
+ # # try:
385
+ # # documents = [item['content'] for item in documents_full]
386
+ # # document_summaries = [f"[DOCUMENT {i+1}]: {summary}{count}" for i, summary in enumerate(documents)]
387
+ # # documents_str = '\n'.join(document_summaries)
388
+ # # formatted_prompt = system_instructions(question_difficulty, topic, documents_str)
389
+
390
+ # # pre_prompt = [
391
+ # # {"role": "system", "content": formatted_prompt}
392
+ # # ]
393
+ # # response = client.text_generation(
394
+ # # formatted_prompt, **generate_kwargs, stream=False, details=False, return_full_text=False,
395
+ # # )
396
+ # # output_json = json.loads(f"{response}")
397
+
398
+ # # global quiz_data
399
+ # # quiz_data = output_json
400
+
401
+ # # for question_num in range(1, 11):
402
+ # # question_key = f"Q{question_num}"
403
+ # # answer_key = f"A{question_num}"
404
+ # # question = quiz_data.get(question_key)
405
+ # # answer = quiz_data.get(quiz_data.get(answer_key))
406
+
407
+ # # if not question or not answer:
408
+ # # continue
409
+
410
+ # # choice_keys = [f"{question_key}:C{i}" for i in range(1, 5)]
411
+ # # choice_list = [quiz_data.get(choice_key, "Choice not found") for choice_key in choice_keys]
412
+
413
+ # # radio = gr.Radio(choices=choice_list, label=question, visible=True, interactive=True)
414
+ # # question_radio_list.append(radio)
415
+
416
+ # # if len(question_radio_list) == 10:
417
+ # # break
418
+ # # else:
419
+ # # count += 1
420
+ # # continue
421
+ # # except Exception as e:
422
+ # # count += 1
423
+ # # if count == 3:
424
+ # # return ['Sorry. Pls try with another topic!'] + [gr.Radio(visible=False) for _ in range(10)]
425
+ # # continue
426
+
427
+ # # return ['Quiz Generated!'] + question_radio_list
428
 
429
+ # # def compare_answers(*user_answers):
430
+ # # user_answer_list = user_answers
431
+ # # answers_list = [quiz_data.get(quiz_data.get(f"A{question_num}")) for question_num in range(1, 11)]
432
+
433
+ # # score = sum(1 for answer in user_answer_list if answer in answers_list)
434
+
435
+ # # if score > 7:
436
+ # # message = f"### Excellent! You got {score} out of 10!"
437
+ # # elif score > 5:
438
+ # # message = f"### Good! You got {score} out of 10!"
439
+ # # else:
440
+ # # message = f"### You got {score} out of 10! Don’t worry, you can prepare well and try better next time!"
441
+
442
+ # # return message
 
 
 
 
 
 
 
443
 
444
+ # #with gr.Blocks(theme='Insuz/SimpleIndigo') as demo:
445
+ # with gr.Blocks(theme='NoCrypt/miku') as CHATBOT:
446
  # with gr.Row():
447
+ # with gr.Column(scale=10):
448
+ # # gr.Markdown(
449
+ # # """
450
+ # # # Theme preview: `paris`
451
+ # # To use this theme, set `theme='earneleh/paris'` in `gr.Blocks()` or `gr.Interface()`.
452
+ # # You can append an `@` and a semantic version expression, e.g. @>=1.0.0,<2.0.0 to pin to a given version
453
+ # # of this theme.
454
+ # # """
455
+ # # )
456
+ # gr.HTML(value="""<div style="color: #FF4500;"><h1>ADWITIYA-</h1> <h1><span style="color: #008000">Custom Manual Chatbot and Quizbot</span></h1>
457
+ # </div>""", elem_id='heading')
458
+
459
+ # gr.HTML(value=f"""
460
+ # <p style="font-family: sans-serif; font-size: 16px;">
461
+ # Using GenAI for CBIC Capacity Building - A free chat bot developed by National Customs Targeting Center using Open source LLMs for CBIC Officers
462
+ # </p>
463
+ # """, elem_id='Sub-heading')
464
+ # #usage_count = get_and_increment_value_count(db,collection_name, field_name)
465
+ # gr.HTML(value=f"""<p style="font-family: Arial, sans-serif; font-size: 14px;">Developed by NCTC,Mumbai . Suggestions may be sent to <a href="mailto:nctc-admin@gov.in" style="color: #00008B; font-style: italic;">ramyadevi1607@yahoo.com</a>.</p>""", elem_id='Sub-heading1 ')
466
 
467
+ # with gr.Column(scale=3):
468
+ # gr.Image(value='logo.png',height=200,width=200)
469
 
470
+
471
+ # chatbot = gr.Chatbot(
472
+ # [],
473
+ # elem_id="chatbot",
474
+ # avatar_images=('https://aui.atlassian.com/aui/8.8/docs/images/avatar-person.svg',
475
+ # 'https://huggingface.co/datasets/huggingface/brand-assets/resolve/main/hf-logo.svg'),
476
+ # bubble_full_width=False,
477
+ # show_copy_button=True,
478
+ # show_share_button=True,
479
+ # )
480
 
481
+ # with gr.Row():
482
+ # txt = gr.Textbox(
483
+ # scale=3,
484
+ # show_label=False,
485
+ # placeholder="Enter text and press enter",
486
+ # container=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
487
  # )
488
+ # txt_btn = gr.Button(value="Submit text", scale=1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
489
 
490
+ # cross_encoder = gr.Radio(choices=['(FAST) MiniLM-L6v2','(ACCURATE) BGE reranker','(HIGH ACCURATE) ColBERT'], value='(ACCURATE) BGE reranker',label="Embeddings", info="Only First query to Colbert may take litte time)")
491
 
492
+ # prompt_html = gr.HTML()
493
+ # # Turn off interactivity while generating if you click
494
+ # txt_msg = txt_btn.click(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
495
+ # bot, [chatbot, cross_encoder], [chatbot, prompt_html])#.then(update_count_html,[],[count_html])
496
 
497
+ # # Turn it back on
498
+ # txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
499
 
500
+ # # Turn off interactivity while generating if you hit enter
501
+ # txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
502
+ # bot, [chatbot, cross_encoder], [chatbot, prompt_html])#.then(update_count_html,[],[count_html])
 
503
 
504
+ # # Turn it back on
505
+ # txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
506
 
507
+ # # Examples
508
+ # gr.Examples(examples, txt)
 
 
 
 
509
 
 
510
 
 
 
 
 
 
 
 
 
 
511
 
 
512
 
513
+ # # with gr.Blocks(title="Quiz Maker", theme=gr.themes.Default(primary_hue="green", secondary_hue="green"), css="style.css") as QUIZBOT:
514
+ # # with gr.Column(scale=4):
515
+ # # gr.HTML("""
516
+ # # <center>
517
+ # # <h1><span style="color: purple;">ADWITIYA</span> Customs Manual Quizbot</h1>
518
+ # # <h2>Generative AI-powered Capacity building for Training Officers</h2>
519
+ # # <i>⚠️ NACIN Faculties create quiz from any topic dynamically for classroom evaluation after their sessions! ⚠️</i>
520
+ # # </center>
521
+ # # """)
522
+
523
+ # # with gr.Column(scale=2):
524
+ # # gr.HTML("""
525
+ # # <center>
526
+
527
+ # # <h2>Ready!</h2>
528
+
529
+ # # </center>
530
+ # # """)
531
+ # # # load_btn = gr.Button("Click to Load!🚀")
532
+ # # # load_text = gr.Textbox()
533
+ # # # load_btn.click(fn=load_model, outputs=load_text)
534
+
535
+ # # topic = gr.Textbox(label="Enter the Topic for Quiz", placeholder="Write any topic/details from Customs Manual")
536
 
537
+ # # with gr.Row():
538
+ # # radio = gr.Radio(["easy", "average", "hard"], label="How difficult should the quiz be?")
539
+
540
+ # # generate_quiz_btn = gr.Button("Generate Quiz!🚀")
541
+ # # quiz_msg = gr.Textbox()
542
+
543
+ # # question_radios = [gr.Radio(visible=False) for _ in range(10)]
544
+
545
+ # # generate_quiz_btn.click(
546
+ # # fn=generate_quiz,
547
+ # # inputs=[radio, topic],
548
+ # # outputs=[quiz_msg] + question_radios
549
+ # # )
550
+
551
+ # # check_button = gr.Button("Check Score")
552
+ # # score_textbox = gr.Markdown()
553
+
554
+ # # check_button.click(
555
+ # # fn=compare_answers,
556
+ # # inputs=question_radios,
557
+ # # outputs=score_textbox
558
+ # # )
559
 
560
+ # #demo = gr.TabbedInterface([CHATBOT, QUIZBOT], ["AI ChatBot", "AI Quizbot"])
561
+ # CHATBOT.queue()
562
+ # CHATBOT.launch(debug=True)
563