Rodolfo Torres commited on
Commit
971a7ea
·
1 Parent(s): c0c69df

Code adjustments, doc. and license inclusion.

Browse files
Files changed (3) hide show
  1. main.py +334 -71
  2. static/index.html +18 -0
  3. static/js/app.js +312 -118
main.py CHANGED
@@ -1,9 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
 
 
 
3
  try:
4
  import intel_extension_for_pytorch as ipex
5
  ipex_enabled = True
6
  except:
 
7
  ipex_enabled = False
8
 
9
  import time
@@ -19,46 +37,302 @@ from fastapi.responses import JSONResponse
19
  from io import BytesIO
20
  import PyPDF2
21
  from newspaper import Article
22
- from transformers import AutoModelForMultipleChoice, AutoTokenizer, AutoModelForQuestionAnswering
 
 
 
 
 
 
 
 
 
 
 
23
 
24
- qa_pipeline = pipeline("question-answering", model="roaltopo/scan-u-doc_question-answer")
25
- bool_q_pipeline = pipeline("text-classification", model="roaltopo/scan-u-doc_bool-question")
26
- model_path = "roaltopo/scan-u-doc_bool-answer"
27
- bool_a_tokenizer = AutoTokenizer.from_pretrained(model_path)
28
- bool_a_model = AutoModelForMultipleChoice.from_pretrained(model_path)
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  app = FastAPI()
31
 
32
- # Diccionario en memoria para almacenar información
33
  text_storage = {}
34
 
35
  class TextInfo(BaseModel):
 
 
 
 
 
 
 
 
36
  text: Optional[str] = None
37
  pdf: Optional[bytes] = None
38
  html_url: Optional[str] = None
39
 
40
  class QuestionInfo(BaseModel):
 
 
 
 
 
 
 
41
  question: str
42
  allow_bool: Optional[bool] = False
43
 
44
- def predict_boolean_answer(text, question):
45
- id2label = {0: "NO", 1: "YES"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
  text += '\n'
47
  question += '\n'
48
- inputs = bool_a_tokenizer([[text, question+'no'], [text, question+'yes']], return_tensors="pt", padding=True)
 
 
49
  labels = torch.tensor(0).unsqueeze(0)
 
 
 
 
 
50
 
51
- outputs = bool_a_model(**{k: v.unsqueeze(0) for k, v in inputs.items()}, labels=labels)
 
52
  logits = outputs.logits
53
 
 
54
  return {'answer': id2label[int(logits.argmax().item())]}
55
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  @app.post("/store_text/{uuid}")
57
  async def store_text(uuid: str, text_info: TextInfo):
 
 
 
 
 
 
 
 
 
 
58
  try:
59
  url = text_info.html_url.strip() if text_info.html_url else None
60
  if url:
61
- print('url:', url)
62
  article = Article(url)
63
  article.download()
64
  article.parse()
@@ -80,9 +354,19 @@ async def store_text(uuid: str, text_info: TextInfo):
80
  print(error_message)
81
  raise HTTPException(status_code=500, detail="Internal Server Error: An unexpected error occurred.")
82
 
83
- # Ruta para cargar un archivo
84
  @app.post("/upload_file/{uuid}")
85
  async def upload_file(uuid: str, file: UploadFile = File(...)):
 
 
 
 
 
 
 
 
 
 
86
  try:
87
  file_extension = file.filename.split('.')[-1].lower()
88
 
@@ -120,90 +404,69 @@ async def upload_file(uuid: str, file: UploadFile = File(...)):
120
  except Exception as e:
121
  return JSONResponse(content={"message": f"Error while uploading the file: {e}"}, status_code=500)
122
 
 
123
  @app.post("/answer_question/{uuid}")
124
  async def answer_question(uuid: str, question_info: QuestionInfo):
 
 
 
 
 
 
 
 
 
 
125
  bool_activate = question_info.allow_bool
126
 
127
  question = question_info.question
128
 
129
- # Verifica si el texto con el ID existe en el diccionario
130
  if uuid not in text_storage:
131
  return {'error': 'Text not found'}
132
 
133
  answer = qa_pipeline(question=question, context=text_storage[uuid]['text'])
134
- if bool_activate :
135
  is_bool_inference = bool_q_pipeline(question)
136
- if is_bool_inference[0]['label'] == 'YES' :
137
  answer = predict_boolean_answer(answer['answer'], question)
138
 
139
  return answer
140
 
141
- ############
142
- def get_score1(model_checkpoint, question, context, num_times, warmup_rounds, has_xpu):
143
- tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
144
- model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
145
- model.eval()
146
-
147
- if has_xpu:
148
- device = 'xpu'
149
- else :
150
- device = None
151
-
152
- qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, device=device) #, torch_dtype=torch.bfloat16
153
- latency_list = []
154
- for i in range(num_times):
155
- time_start = time.time()
156
- answer = qa_pipeline(question=question, context=context)
157
- if i >= warmup_rounds:
158
- latency_list.append(time.time() - time_start)
159
- pipeline_inference_time = np.mean(latency_list)
160
- return pipeline_inference_time
161
-
162
- def get_score2(model_checkpoint, question, context, num_times, warmup_rounds, has_xpu):
163
- tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
164
- model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
165
- model.eval()
166
-
167
- if has_xpu:
168
- device = 'xpu'
169
- else :
170
- device = None
171
-
172
- if ipex_enabled:
173
- #################### code changes ####################
174
- model = ipex.optimize(model, weights_prepack=False)
175
- model = torch.compile(model, backend="ipex")
176
- ######################################################
177
- with torch.no_grad():
178
- qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, device=device) #, torch_dtype=torch.bfloat16
179
- latency_list = []
180
- for i in range(num_times):
181
- time_start = time.time()
182
- answer = qa_pipeline(question=question, context=context)
183
- if i >= warmup_rounds:
184
- latency_list.append(time.time() - time_start)
185
- pipeline_inference_time = np.mean(latency_list)
186
- return pipeline_inference_time
187
 
188
  @app.get("/benchmark")
189
- async def benchmark(question: str, context: str):
190
- num_times = 50
191
- warmup_rounds = 20
192
 
193
- model_checkpoint = "roaltopo/scan-u-doc_question-answer"
 
 
 
 
194
 
 
 
 
 
 
195
 
196
- has_xpu = torch.xpu.device_count()
197
- score1 = get_score1(model_checkpoint, question, context, num_times, warmup_rounds, has_xpu)
198
- score2 = get_score2(model_checkpoint, question, context, num_times, warmup_rounds, has_xpu)
199
 
200
- return {'has_xpu': has_xpu, 'ipex_enabled': ipex_enabled,'score1': score1, 'score2': score2}
201
 
202
- ############
203
 
204
 
205
  app.mount("/", StaticFiles(directory="static", html=True), name="static")
206
 
207
  @app.get("/")
208
  def index() -> FileResponse:
 
 
 
 
 
 
209
  return FileResponse(path="/app/static/index.html", media_type="text/html")
 
 
1
+ """
2
+ # Main module for the ScanUDoc application, containing various endpoints for text processing and benchmarking.
3
+
4
+ # Author: Rodolfo Torres
5
+ # Email: rodolfo.torres@outlook.com
6
+ # LinkedIn: https://www.linkedin.com/in/rodolfo-torres-p
7
+
8
+ # This module includes endpoints for text processing, benchmarking of different pipelines, and handling file uploads.
9
+ # The code is licensed under the GPL-3.0 license, which is a widely used open-source license, ensuring that any derivative work is also open source.
10
+ # It grants users the freedom to use, modify, and distribute the software, as well as any modifications or extensions made to it.
11
+ # However, any modified versions of the software must also be licensed under GPL-3.0.
12
+
13
+ # For more details, please refer to the full text of the GPL-3.0 license at https://www.gnu.org/licenses/gpl-3.0.html.
14
+ """
15
+
16
  import torch
17
 
18
+ # Attempt to import the Intel Extension for PyTorch module.
19
+ # Set the 'ipex_enabled' flag accordingly to indicate if the import was successful.
20
  try:
21
  import intel_extension_for_pytorch as ipex
22
  ipex_enabled = True
23
  except:
24
+ # If the import fails, set 'ipex_enabled' to False.
25
  ipex_enabled = False
26
 
27
  import time
 
37
  from io import BytesIO
38
  import PyPDF2
39
  from newspaper import Article
40
+ from transformers import AutoModelForMultipleChoice, AutoTokenizer, AutoModelForQuestionAnswering, AutoModelForSequenceClassification
41
+
42
+ try:
43
+ # Check if there is any XPU (any accelerator device) available with PyTorch.
44
+ has_xpu = torch.xpu.device_count()
45
+ except:
46
+ # If there is an error during the device count check, set 'has_xpu' to False.
47
+ has_xpu = False
48
+
49
+ def get_qa_pipeline(optimize=True):
50
+ """
51
+ Creates a question-answering pipeline using a pre-trained model and tokenizer. Optionally applies Intel PyTorch Extension optimizations.
52
 
53
+ Parameters:
54
+ - optimize (bool): A flag indicating whether to apply Intel PyTorch Extension optimizations. Default is True.
 
 
 
55
 
56
+ Returns:
57
+ - qa_pipeline: A pipeline for question-answering using the specified model and tokenizer.
58
+ """
59
+
60
+ # Define the model checkpoint for the question-answering pipeline.
61
+ model_checkpoint = "roaltopo/scan-u-doc_question-answer"
62
+
63
+ # Initialize the tokenizer and the model for question-answering based on the specified checkpoint.
64
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
65
+ model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
66
+ model.eval()
67
+
68
+ # Determine the device based on the availability of an XPU and the 'ipex_enabled' flag.
69
+ if has_xpu:
70
+ device = 'xpu'
71
+ else:
72
+ device = None
73
+
74
+ if ipex_enabled and optimize:
75
+ # Apply Intel PyTorch Extension optimizations if 'ipex_enabled' and 'optimize' are both True.
76
+ model = ipex.optimize(model, weights_prepack=False)
77
+ model = torch.compile(model, backend="ipex")
78
+
79
+ # Use 'torch.no_grad()' to ensure that no gradient calculations are performed during inference.
80
+ with torch.no_grad():
81
+ # Create a question-answering pipeline using the specified model and tokenizer.
82
+ # Set the torch data type to 'torch.bfloat16' and the device according to the determined value.
83
+ qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, device=device)
84
+ return qa_pipeline
85
+
86
+
87
+ def get_bool_q_pipeline(optimize=True):
88
+ """
89
+ Creates a pipeline for text classification for boolean questions using a pre-trained model and tokenizer.
90
+ Optionally applies Intel PyTorch Extension optimizations.
91
+
92
+ Parameters:
93
+ - optimize (bool): A flag indicating whether to apply Intel PyTorch Extension optimizations. Default is True.
94
+
95
+ Returns:
96
+ - bool_q_pipeline: A pipeline for text classification for boolean questions using the specified model and tokenizer.
97
+ """
98
+ # Define the model checkpoint for the boolean question pipeline.
99
+ model_checkpoint = "roaltopo/scan-u-doc_bool-question"
100
+
101
+ # Initialize the tokenizer and the model for text classification based on the specified checkpoint.
102
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
103
+ model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint)
104
+ model.eval()
105
+
106
+ # Determine the device based on the availability of an XPU and the 'ipex_enabled' flag.
107
+ if has_xpu:
108
+ device = 'xpu'
109
+ else:
110
+ device = None
111
+
112
+ if ipex_enabled and optimize:
113
+ # Apply Intel PyTorch Extension optimizations if 'ipex_enabled' and 'optimize' are both True.
114
+ model = ipex.optimize(model, weights_prepack=False)
115
+ model = torch.compile(model, backend="ipex")
116
+
117
+ # Use 'torch.no_grad()' to ensure that no gradient calculations are performed during inference.
118
+ with torch.no_grad():
119
+ # Create a text classification pipeline for boolean questions using the specified model and tokenizer.
120
+ # Set the torch data type to 'torch.bfloat16' and the device according to the determined value.
121
+ bool_q_pipeline = pipeline("text-classification", model=model, tokenizer=tokenizer, torch_dtype=torch.bfloat16, device=device)
122
+ return bool_q_pipeline
123
+
124
+
125
+ def get_bool_a_model(optimize=True):
126
+ """
127
+ Retrieves the pre-trained model and tokenizer for answering boolean questions.
128
+ Optionally applies Intel PyTorch Extension optimizations.
129
+
130
+ Parameters:
131
+ - optimize (bool): A flag indicating whether to apply Intel PyTorch Extension optimizations. Default is True.
132
+
133
+ Returns:
134
+ - model: The pre-trained model for answering boolean questions.
135
+ - tokenizer: The tokenizer corresponding to the model.
136
+ """
137
+ # Define the model checkpoint for the boolean answer model.
138
+ model_checkpoint = "roaltopo/scan-u-doc_bool-answer"
139
+
140
+ # Initialize the model and the tokenizer for multiple-choice answers based on the specified checkpoint.
141
+ model = AutoModelForMultipleChoice.from_pretrained(model_checkpoint)
142
+ tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
143
+
144
+ if has_xpu:
145
+ # If an XPU is available, move the model to the XPU device.
146
+ model = model.to("xpu")
147
+
148
+ model.eval()
149
+
150
+ if ipex_enabled and optimize:
151
+ # Apply Intel PyTorch Extension optimizations if 'ipex_enabled' and 'optimize' are both True.
152
+ model = ipex.optimize(model, weights_prepack=False)
153
+ model = torch.compile(model, backend="ipex")
154
+ return model, tokenizer
155
+
156
+
157
+ # Initialize the question-answering pipeline using the 'get_qa_pipeline' function.
158
+ qa_pipeline = get_qa_pipeline()
159
+ # Initialize the pipeline for text classification for boolean questions using the 'get_bool_q_pipeline' function.
160
+ bool_q_pipeline = get_bool_q_pipeline()
161
+ # Retrieve the model and tokenizer for answering boolean questions using the 'get_bool_a_model' function.
162
+ bool_a_model, bool_a_tokenizer = get_bool_a_model()
163
+
164
+ # Initialize the FastAPI application.
165
  app = FastAPI()
166
 
167
+ # In-memory dictionary for storing information during runtime.
168
  text_storage = {}
169
 
170
  class TextInfo(BaseModel):
171
+ """
172
+ A Pydantic Base model representing information related to text data.
173
+
174
+ Attributes:
175
+ - text (str): Optional. The text data to be processed.
176
+ - pdf (bytes): Optional. The PDF data to be processed.
177
+ - html_url (str): Optional. The URL pointing to the HTML content to be processed.
178
+ """
179
  text: Optional[str] = None
180
  pdf: Optional[bytes] = None
181
  html_url: Optional[str] = None
182
 
183
  class QuestionInfo(BaseModel):
184
+ """
185
+ A Pydantic Base model representing information related to a specific question.
186
+
187
+ Attributes:
188
+ - question (str): The question to be answered or classified.
189
+ - allow_bool (bool): Optional. Flag indicating whether to allow boolean question types. Default is False.
190
+ """
191
  question: str
192
  allow_bool: Optional[bool] = False
193
 
194
+
195
+ def predict_boolean_answer(text, question, model=bool_a_model, tokenizer=bool_a_tokenizer):
196
+ """
197
+ Predicts a boolean answer for the given text and question using the specified model and tokenizer.
198
+
199
+ Parameters:
200
+ - text (str): The text data for context.
201
+ - question (str): The question to be answered.
202
+ - model: The pre-trained model for answering boolean questions. Default is 'bool_a_model'.
203
+ - tokenizer: The tokenizer corresponding to the model. Default is 'bool_a_tokenizer'.
204
+
205
+ Returns:
206
+ - dict: A dictionary containing the predicted boolean answer.
207
+ """
208
+ # Mapping for converting predicted labels to human-readable answers.
209
+ id2label = {0: "No", 1: "Yes"}
210
  text += '\n'
211
  question += '\n'
212
+
213
+ # Tokenize the text and question inputs for the model.
214
+ inputs = tokenizer([[text, question+'no'], [text, question+'yes']], return_tensors="pt", padding=True)
215
  labels = torch.tensor(0).unsqueeze(0)
216
+
217
+ if has_xpu:
218
+ # If an XPU is available, move the inputs and labels to the XPU device.
219
+ inputs = inputs.to("xpu")
220
+ labels = labels.to("xpu")
221
 
222
+ # Perform the forward pass with the model to get the outputs and logits.
223
+ outputs = model(**{k: v.unsqueeze(0) for k, v in inputs.items()}, labels=labels)
224
  logits = outputs.logits
225
 
226
+ # Return the predicted boolean answer in a dictionary format.
227
  return {'answer': id2label[int(logits.argmax().item())]}
228
 
229
+
230
+ def get_qa_score(question, context, optimize, num_times, warmup_rounds):
231
+ """
232
+ Calculates the average inference time for the question-answering pipeline.
233
+
234
+ Parameters:
235
+ - question (str): The question to be answered.
236
+ - context (str): The context for the question.
237
+ - optimize (bool): A flag indicating whether to apply optimizations to the pipeline.
238
+ - num_times (int): The number of times the inference is run to calculate the average time.
239
+ - warmup_rounds (int): The number of initial rounds to be ignored for calculating the average time.
240
+
241
+ Returns:
242
+ - pipeline_inference_time: The average inference time for the question-answering pipeline.
243
+ """
244
+ if optimize:
245
+ pipeline = qa_pipeline
246
+ else:
247
+ pipeline = get_qa_pipeline(optimize=False)
248
+
249
+ with torch.no_grad():
250
+ latency_list = []
251
+ for i in range(num_times):
252
+ time_start = time.time()
253
+ answer = pipeline(question=question, context=context)
254
+ if i >= warmup_rounds:
255
+ latency_list.append(time.time() - time_start)
256
+ pipeline_inference_time = np.mean(latency_list)
257
+ return pipeline_inference_time
258
+
259
+
260
+ def get_bool_q_score(question, optimize, num_times, warmup_rounds):
261
+ """
262
+ Calculates the average inference time for the text classification pipeline for boolean questions.
263
+
264
+ Parameters:
265
+ - question (str): The question to be classified.
266
+ - optimize (bool): A flag indicating whether to apply optimizations to the pipeline.
267
+ - num_times (int): The number of times the inference is run to calculate the average time.
268
+ - warmup_rounds (int): The number of initial rounds to be ignored for calculating the average time.
269
+
270
+ Returns:
271
+ - pipeline_inference_time: The average inference time for the text classification pipeline for boolean questions.
272
+ """
273
+ if optimize:
274
+ pipeline = bool_q_pipeline
275
+ else:
276
+ pipeline = get_bool_q_pipeline(optimize=False)
277
+
278
+ with torch.no_grad():
279
+ latency_list = []
280
+ for i in range(num_times):
281
+ time_start = time.time()
282
+ answer = pipeline(question)
283
+ if i >= warmup_rounds:
284
+ latency_list.append(time.time() - time_start)
285
+ pipeline_inference_time = np.mean(latency_list)
286
+ return pipeline_inference_time
287
+
288
+
289
+ def get_bool_a_score(text, question, optimize, num_times, warmup_rounds):
290
+ """
291
+ Calculates the average inference time for answering boolean questions.
292
+
293
+ Parameters:
294
+ - text (str): The text data for context.
295
+ - question (str): The question to be answered.
296
+ - optimize (bool): A flag indicating whether to apply optimizations to the pipeline.
297
+ - num_times (int): The number of times the inference is run to calculate the average time.
298
+ - warmup_rounds (int): The number of initial rounds to be ignored for calculating the average time.
299
+
300
+ Returns:
301
+ - pipeline_inference_time: The average inference time for answering boolean questions.
302
+ """
303
+ if not optimize:
304
+ model, tokenizer = get_bool_a_model(optimize=optimize)
305
+ else:
306
+ model = bool_a_model
307
+ tokenizer = bool_a_tokenizer
308
+
309
+ with torch.no_grad():
310
+ latency_list = []
311
+ for i in range(num_times):
312
+ time_start = time.time()
313
+ answer = predict_boolean_answer(text, question, model=model, tokenizer=tokenizer)
314
+ if i >= warmup_rounds:
315
+ latency_list.append(time.time() - time_start)
316
+ pipeline_inference_time = np.mean(latency_list)
317
+ return pipeline_inference_time
318
+
319
+
320
+
321
  @app.post("/store_text/{uuid}")
322
  async def store_text(uuid: str, text_info: TextInfo):
323
+ """
324
+ Stores text data in the in-memory dictionary using the provided UUID.
325
+
326
+ Parameters:
327
+ - uuid (str): The unique identifier for the stored text data.
328
+ - text_info (TextInfo): A Pydantic Base model containing information related to the text data.
329
+
330
+ Returns:
331
+ - dict: A dictionary indicating the success of the storing operation.
332
+ """
333
  try:
334
  url = text_info.html_url.strip() if text_info.html_url else None
335
  if url:
 
336
  article = Article(url)
337
  article.download()
338
  article.parse()
 
354
  print(error_message)
355
  raise HTTPException(status_code=500, detail="Internal Server Error: An unexpected error occurred.")
356
 
357
+
358
  @app.post("/upload_file/{uuid}")
359
  async def upload_file(uuid: str, file: UploadFile = File(...)):
360
+ """
361
+ Uploads a file and extracts text content to be stored in the in-memory dictionary using the provided UUID.
362
+
363
+ Parameters:
364
+ - uuid (str): The unique identifier for the stored text data.
365
+ - file (UploadFile): The file to be uploaded.
366
+
367
+ Returns:
368
+ - JSONResponse: A JSON response indicating the success or failure of the file upload and text extraction process.
369
+ """
370
  try:
371
  file_extension = file.filename.split('.')[-1].lower()
372
 
 
404
  except Exception as e:
405
  return JSONResponse(content={"message": f"Error while uploading the file: {e}"}, status_code=500)
406
 
407
+
408
  @app.post("/answer_question/{uuid}")
409
  async def answer_question(uuid: str, question_info: QuestionInfo):
410
+ """
411
+ Answers a given question based on the stored text corresponding to the provided UUID.
412
+
413
+ Parameters:
414
+ - uuid (str): The unique identifier for the stored text data.
415
+ - question_info (QuestionInfo): A Pydantic Base model containing information related to the question.
416
+
417
+ Returns:
418
+ - dict: A dictionary containing the answer to the question.
419
+ """
420
  bool_activate = question_info.allow_bool
421
 
422
  question = question_info.question
423
 
424
+ # Verify if the text with the ID exists in the dictionary
425
  if uuid not in text_storage:
426
  return {'error': 'Text not found'}
427
 
428
  answer = qa_pipeline(question=question, context=text_storage[uuid]['text'])
429
+ if bool_activate:
430
  is_bool_inference = bool_q_pipeline(question)
431
+ if is_bool_inference[0]['label'] == 'YES':
432
  answer = predict_boolean_answer(answer['answer'], question)
433
 
434
  return answer
435
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
436
 
437
  @app.get("/benchmark")
438
+ async def benchmark(question: str, context: str, num_times: int, warmup_rounds: int):
439
+ """
440
+ Conducts benchmarking for the different pipeline components based on the specified parameters.
441
 
442
+ Parameters:
443
+ - question (str): The question to be used for benchmarking.
444
+ - context (str): The context for the question.
445
+ - num_times (int): The number of times the inference is run to calculate the average time.
446
+ - warmup_rounds (int): The number of initial rounds to be ignored for calculating the average time.
447
 
448
+ Returns:
449
+ - dict: A dictionary containing the benchmarking results for the question-answering and boolean pipelines.
450
+ """
451
+ qa = { get_qa_score(question, context, False, num_times, warmup_rounds), get_qa_score(question, context, True, num_times, warmup_rounds)}
452
+ bool_q = { get_bool_q_score(question, False, num_times, warmup_rounds), get_bool_q_score(question, True, num_times, warmup_rounds)}
453
 
454
+ answer = qa_pipeline(question=question, context=context)
455
+ bool_a = { get_bool_a_score(answer['answer'], question, False, num_times, warmup_rounds), get_bool_a_score(answer['answer'], question, True, num_times, warmup_rounds)}
 
456
 
457
+ return {'has_xpu': has_xpu, 'ipex_enabled': ipex_enabled,'qa': qa, 'bool_q': bool_q, 'bool_a': bool_a, 'answer': answer['answer']}
458
 
 
459
 
460
 
461
  app.mount("/", StaticFiles(directory="static", html=True), name="static")
462
 
463
  @app.get("/")
464
  def index() -> FileResponse:
465
+ """
466
+ Returns the index.html file as the main landing page.
467
+
468
+ Returns:
469
+ - FileResponse: The index.html file as the main landing page.
470
+ """
471
  return FileResponse(path="/app/static/index.html", media_type="text/html")
472
+
static/index.html CHANGED
@@ -1,3 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  <!doctype html>
2
  <html lang="en">
3
  <head>
 
1
+ <!--
2
+ HTML file for the main page of the ScanUDoc application.
3
+
4
+ Author: Rodolfo Torres
5
+ Email: rodolfo.torres@outlook.com
6
+ LinkedIn: https://www.linkedin.com/in/rodolfo-torres-p
7
+
8
+ This HTML file serves as the main interface for the ScanUDoc application, providing users with access to various features and functionalities.
9
+ It is an essential component that allows users to interact with the application's services and perform necessary tasks, such as uploading files,
10
+ processing text, and analyzing results.
11
+
12
+ The code is licensed under the GPL-3.0 license, which is a widely used open-source license, ensuring that any derivative work is also open source.
13
+ It grants users the freedom to use, modify, and distribute the software, as well as any modifications or extensions made to it.
14
+ However, any modified versions of the software must also be licensed under GPL-3.0.
15
+
16
+ For more details, please refer to the full text of the GPL-3.0 license at https://www.gnu.org/licenses/gpl-3.0.html.
17
+ -->
18
+
19
  <!doctype html>
20
  <html lang="en">
21
  <head>
static/js/app.js CHANGED
@@ -1,3 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  /*/ Not change any values of the variables below,
2
  use the "json/config.json" file to make your settings. /*/
3
  let data_index = "";
@@ -18,7 +32,6 @@ let chat_maxlength = 0;
18
  let lang_index = 0;
19
  let scrollPosition = 0;
20
 
21
- let is_model_turbo = false;
22
  let use_text_stream = false;
23
  let display_microphone_in_chat = false;
24
  let display_avatar_in_chat = false;
@@ -56,6 +69,13 @@ if (window.location.protocol === 'file:') {
56
  //Loads the characters from the config.json file and appends them to the initial slider
57
  loadData("json/config.json", ["json/prompts-" + user_prompt_lang + ".json", "json/lang.json", "json/badwords.json"]);
58
 
 
 
 
 
 
 
 
59
  function loadData(url, urls) {
60
  // Fetch data from the given url and an array of urls using Promise.all and map functions
61
  return Promise.all([fetch(url).then(res => res.json()), ...urls.map(url => fetch(url).then(res => res.json()))])
@@ -135,6 +155,11 @@ function loadData(url, urls) {
135
  }).catch(err => { throw err })
136
  }
137
 
 
 
 
 
 
138
  function currentDate() {
139
  const timestamp = new Date();
140
  return timestamp.toLocaleString();
@@ -144,7 +169,9 @@ function currentDate() {
144
  // Define a placeholder for the image
145
  const placeholder = "img/placeholder.svg";
146
 
147
- // Check if the image is in the visible area
 
 
148
  $(window).on("scroll", function () {
149
  $("img[data-src]").each(function () {
150
  if (isElementInViewport($(this))) {
@@ -154,7 +181,12 @@ $(window).on("scroll", function () {
154
  });
155
  });
156
 
157
- // Helper function to check if the element is in the visible area
 
 
 
 
 
158
  function isElementInViewport(el) {
159
  const rect = el.get(0).getBoundingClientRect();
160
  return (
@@ -165,14 +197,19 @@ function isElementInViewport(el) {
165
  );
166
  }
167
 
168
- //Main function of GPT-3 chat API
 
 
 
 
 
169
  async function getResponse(prompt) {
170
 
171
  //Conversation history
172
  array_chat.push({ "name": "User", "message": prompt, "isImg": false, "date": currentDate() })
173
  array_messages = [];
174
 
175
- //Converting chat to turbo API model
176
  for (let i = 0; i < array_chat.length; i++) {
177
  let message = { "role": "", "content": "" };
178
 
@@ -194,11 +231,6 @@ async function getResponse(prompt) {
194
  var slice_messages = max_num_chats_api - 2;
195
  array_messages = array_messages.slice(0, 2).concat(array_messages.slice(-slice_messages));
196
  }
197
- /*
198
- const params = new URLSearchParams();
199
- params.append('array_chat', JSON.stringify(array_messages));
200
- params.append('prompts_name', prompts_name);
201
- */
202
 
203
  try {
204
  let question = array_messages[array_messages.length - 1].content;
@@ -209,13 +241,12 @@ async function getResponse(prompt) {
209
  allow_bool = true;
210
  }
211
 
212
- // Datos a enviar al servidor
213
  var questionData = {
214
  question: question,
215
  allow_bool: allow_bool,
216
  };
217
 
218
- //console.log(message);
219
  const fullPrompt = "That is a responses' example maded in English to test capacities of that chat";
220
  const randomID = generateUniqueID();
221
  $("#overflow-chat").append(`
@@ -236,20 +267,15 @@ async function getResponse(prompt) {
236
  </div>
237
  `);
238
 
239
- //$(`.${randomID}`).append(fullPrompt);
240
- //scrollChatBottom();
241
- //OK
242
-
243
- // Realiza una llamada POST al endpoint /answer_question
244
  $.ajax({
245
  type: "POST",
246
  url: `/answer_question/${uuid}`,
247
  data: JSON.stringify(questionData),
248
  contentType: "application/json",
249
  success: function (data) {
250
- // La respuesta se encuentra en data.response
251
  var response = data.answer;
252
- //console.log(data, response);
253
 
254
  $(".cursor").remove();
255
  str = $(`.${randomID}`).html();
@@ -279,11 +305,24 @@ async function getResponse(prompt) {
279
  }
280
  }
281
 
 
 
 
 
 
 
282
  function generateUniqueID(prefix = 'id_') {
283
  const timestamp = Date.now();
284
  return `${prefix}${timestamp}`;
285
  }
286
 
 
 
 
 
 
 
 
287
  function streamChat(source, randomID) {
288
  let fullPrompt = "";
289
  let partPrompt = "";
@@ -339,7 +378,7 @@ function streamChat(source, randomID) {
339
  return;
340
  }
341
 
342
- var choice = tokens.choices[0]; //is_model_turbo ? tokens.choices[0].delta : tokens.choices[0];
343
  partPrompt = "";
344
  if (choice.content || choice.text) {
345
  fullPrompt += choice.content || choice.text;
@@ -357,19 +396,26 @@ function streamChat(source, randomID) {
357
  }
358
 
359
 
 
 
 
360
  function saveChatHistory() {
361
  /*
362
  if (array_widgets[data_index]) {
363
  array_widgets[data_index].last_chat = array_chat;
364
  }
365
  if(chat_history){
366
- localStorage.setItem("oracle_chat_v1", JSON.stringify(array_widgets));
367
  }
368
  console.log("Saving...")
369
  */
370
  }
371
 
372
- //Function that appends the AI response in the chat in html
 
 
 
 
373
  function responseChat(response) {
374
 
375
  for (var i = 0; i < filterBotWords.length; i++) {
@@ -414,6 +460,11 @@ function responseChat(response) {
414
  checkClearChatDisplay();
415
  }
416
 
 
 
 
 
 
417
  function appendChatImg(chat) {
418
  const imageID = Date.now();
419
  IAimagePrompt = chat.replace("/img ", "");
@@ -448,7 +499,10 @@ function appendChatImg(chat) {
448
  $("#chat").val("");
449
  }
450
 
451
- //Function that sends the user's question to the chat in html and to the API
 
 
 
452
  function sendUserChat() {
453
  let chat = $("#chat").val();
454
 
@@ -504,7 +558,12 @@ function sendUserChat() {
504
  disableChat();
505
  }
506
 
507
- //Send message in chat by pressing enter
 
 
 
 
 
508
  $("#chat").keypress(function (e) {
509
  if (e.which === 13 && !e.shiftKey) {
510
  sendUserChat();
@@ -512,26 +571,27 @@ $("#chat").keypress(function (e) {
512
  }
513
  });
514
 
515
-
 
 
 
516
  $(".btn-send-chat").on("click", function () {
517
  sendUserChat();
518
  })
519
 
520
 
521
- // Function to shuffle the array
522
- function shuffleArray(array) {
523
- return array.sort(() => Math.random() - 0.5);
524
- }
525
-
526
  function translate() {
527
  translationObj = lang.translate[lang_index];
528
 
529
- // Loop através de todas as chaves do objeto translationObj
530
  for (let key in translationObj) {
531
- // Obtenha o valor da chave atual
532
  let value = translationObj[key];
533
 
534
- // Encontre todos os elementos no HTML que contêm o bloco entre {{ e }}
535
  let elements = document.body.querySelectorAll('*:not(script):not(style)');
536
  elements.forEach(function (element) {
537
  for (let i = 0; i < element.childNodes.length; i++) {
@@ -540,11 +600,11 @@ function translate() {
540
  let text = node.nodeValue;
541
  let regex = new RegExp(`{{\\s*${key}\\s*}}`, 'g');
542
  if (regex.test(text)) {
543
- // Use a propriedade innerHTML para interpretar as tags HTML
544
  node.parentElement.innerHTML = text.replace(regex, value);
545
  }
546
  } else if (node.nodeType === Node.ELEMENT_NODE) {
547
- // Para elementos com atributos HTML, substitua o valor da chave no atributo
548
  let attributes = node.attributes;
549
  for (let j = 0; j < attributes.length; j++) {
550
  let attribute = attributes[j];
@@ -559,6 +619,10 @@ function translate() {
559
  }
560
  }
561
 
 
 
 
 
562
  function closeChat() {
563
  hideChat();
564
  enableChat();
@@ -573,6 +637,10 @@ function closeChat() {
573
  return false;
574
  }
575
 
 
 
 
 
576
  function stopChat() {
577
  if (source) {
578
  enableChat();
@@ -581,16 +649,30 @@ function stopChat() {
581
  }
582
  }
583
 
 
 
 
 
584
  $(".btn-cancel-chat").on("click", function () {
585
  stopChat();
586
  })
587
 
 
 
 
 
588
  document.addEventListener("keydown", function (event) {
589
  if (event.key === "Escape") {
590
  closeChat();
591
  }
592
  });
593
 
 
 
 
 
 
 
594
  function hideChat() {
595
  hideFeedback();
596
  cancelSpeechSynthesis();
@@ -599,81 +681,90 @@ function hideChat() {
599
  if (/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent)) {
600
  $("#overflow-chat").hide();
601
  }
602
-
603
  }
604
 
605
- // Agrega el evento para el botón "Enviar"
 
 
 
 
606
  $('#sendButton').click(function (evt) {
607
  evt.preventDefault();
608
 
609
  var textData = {
610
- text: $('#textArea').val(),
611
  };
612
 
613
- // Configurar la posición de Toastr en la parte superior
614
  toastr.options.positionClass = 'toast-top-center';
615
 
616
- // Verificar si la variable de texto está vacía
617
  if (textData.text.trim() === '') {
618
  toastr.error("Error: Text cannot be empty.");
619
  return;
620
  }
621
 
622
- // Deshabilitar el botón y agregar un spinner
623
  var sendButton = $('#sendButton');
624
  sendButton.prop('disabled', true);
625
  sendButton.html('<span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Sending...');
626
 
627
- // Realiza una llamada POST al endpoint /store_text
628
  $.ajax({
629
  type: "POST",
630
  url: `/store_text/${uuid}`,
631
  data: JSON.stringify(textData),
632
  contentType: "application/json",
633
  success: function (data) {
634
- // Habilitar el botón nuevamente
635
  sendButton.prop('disabled', false);
636
  sendButton.html('Send');
637
 
638
  $('#textArea').val('');
639
- // Cierra el modal después de enviar el texto
640
  textModal.hide();
641
  displayChat(chatId);
642
  },
643
  error: function (xhr, status, error) {
644
- // Verificar si hay un código de error del backend
645
  if (xhr.status === 400 || xhr.status === 500) {
646
  toastr.error(`Error: ${xhr.status} - ${error}`);
647
  } else {
648
  toastr.error("Error: Connection refused. Please try again later.");
649
  }
650
 
651
- // Habilitar el botón nuevamente
652
  sendButton.prop('disabled', false);
653
  sendButton.html('Send');
654
  }
655
  });
656
  });
657
 
 
 
 
 
 
 
658
  $('#sendButton2').click(function (evt) {
659
  evt.preventDefault();
660
  var formData = new FormData($('#file-form')[0]);
661
  var sendButton = $('#sendButton2');
662
 
663
- // Configurar la posición de Toastr en la parte superior
664
  toastr.options.positionClass = 'toast-top-center';
665
 
666
  var fileInput = $('#fileInput')[0];
667
- var fileSize = fileInput.files[0].size; // Tamaño en bytes
668
- var maxSize = 1*1024*1024; // 1MB en bytes
669
 
670
- // Validar el tamaño del archivo
671
  if (fileSize > maxSize) {
672
  toastr.error('Error: File size exceeds 1MB limit.');
673
  return;
674
  }
675
 
676
- // Deshabilitar el botón y agregar un spinner
677
  sendButton.prop('disabled', true);
678
  sendButton.html('<span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Uploading...');
679
 
@@ -687,43 +778,47 @@ $('#sendButton2').click(function (evt) {
687
  processData: false,
688
  success: function (data) {
689
  $('#fileInput').val('');
690
- // Habilitar el botón nuevamente
691
  sendButton.prop('disabled', false);
692
  sendButton.html('Send');
693
 
694
- // Cierra el modal después de enviar el texto
695
  textModal.hide();
696
  displayChat(chatId);
697
  },
698
  error: function (xhr, status, error) {
699
- // Mostrar mensaje de error con Toastr
700
  toastr.error('Error uploading the file');
701
 
702
- // Habilitar el botón nuevamente
703
  sendButton.prop('disabled', false);
704
  sendButton.html('Send');
705
  }
706
  });
707
  });
708
 
709
-
 
 
 
 
710
  $('#sendButton3').click(function () {
711
  var textData = {
712
  html_url: $('#url').val(),
713
  };
714
 
715
- // Configurar la posición de Toastr en la parte superior
716
  toastr.options.positionClass = 'toast-top-center';
717
 
718
  var sendButton = $('#sendButton3');
719
 
720
- // Verificar si la variable de texto está vacía
721
  if (textData.html_url.trim() === '') {
722
  toastr.error("Error: URL cannot be empty.");
723
  return;
724
  }
725
 
726
- // Validar la URL
727
  var urlRegex = new RegExp('^(https?:\\/\\/)?'+
728
  '((([a-z\\d]([a-z\\d-]*[a-z\\d])*)\\.)+[a-z]{2,}|'+
729
  '((\\d{1,3}\\.){3}\\d{1,3}))'+
@@ -735,11 +830,11 @@ $('#sendButton3').click(function () {
735
  return;
736
  }
737
 
738
- // Deshabilitar el botón y agregar un spinner
739
  sendButton.prop('disabled', true);
740
  sendButton.html('<span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Sending...');
741
 
742
- // Realiza una llamada POST al endpoint /store_text
743
  $.ajax({
744
  type: "POST",
745
  url: `/store_text/${uuid}`,
@@ -747,11 +842,11 @@ $('#sendButton3').click(function () {
747
  contentType: "application/json",
748
  success: function (data) {
749
  $('#url').val('');
750
- // Habilitar el botón nuevamente
751
  sendButton.prop('disabled', false);
752
  sendButton.html('Send');
753
 
754
- // Cierra el modal después de enviar el texto
755
  textModal.hide();
756
  displayChat(chatId);
757
  },
@@ -762,7 +857,7 @@ $('#sendButton3').click(function () {
762
  toastr.error(`Error: ${xhr.status} - ${error}`);
763
  }
764
 
765
- // Habilitar el botón nuevamente
766
  sendButton.prop('disabled', false);
767
  sendButton.html('Send');
768
  }
@@ -771,7 +866,10 @@ $('#sendButton3').click(function () {
771
 
772
 
773
 
774
-
 
 
 
775
  $(document).delegate(".start-chat", "click", function () {
776
  chatId = $(this).attr("data-index");
777
  if (chatId == 0) {
@@ -790,10 +888,13 @@ $(document).delegate(".start-chat", "click", function () {
790
  });
791
  textModal.show();
792
  }
793
- //console.log(chatId);
794
- //displayChat($(this).attr("data-index"));
795
  })
796
 
 
 
 
 
 
797
  function displayChat(index) {
798
  data_index = index;
799
  cancelSpeechSynthesis();
@@ -844,7 +945,11 @@ function displayChat(index) {
844
  translate();
845
  }
846
 
847
-
 
 
 
 
848
  const escapeHtml = (str) => {
849
 
850
  // Check if the string contains <code> or <pre> tags
@@ -883,7 +988,10 @@ const escapeHtml = (str) => {
883
  return str;
884
  };
885
 
886
- // function to copy the text content
 
 
 
887
  function copyText(button) {
888
  const div = button.parentElement;
889
  const code = div.querySelector('.chat-response');
@@ -896,7 +1004,10 @@ function copyText(button) {
896
  button.innerHTML = lang["translate"][lang_index].copy_text2;
897
  }
898
 
899
- // Function to copy the content of the <pre> tag
 
 
 
900
  function copyCode(button) {
901
  const pre = button.parentElement;
902
  const code = pre.querySelector('code');
@@ -909,7 +1020,10 @@ function copyCode(button) {
909
  button.innerHTML = lang["translate"][lang_index].copy_code2;
910
  }
911
 
912
- // Clear Chat
 
 
 
913
  function clearChat(target) {
914
  // Display confirmation dialog using SweetAlert2 library
915
  Swal.fire({
@@ -958,7 +1072,7 @@ function clearChat(target) {
958
  "date": currentDate()
959
  })
960
  // Save updated character data to local storage
961
- localStorage.setItem("oracle_chat_v1", JSON.stringify(array_widgets));
962
 
963
  // If enabled, display welcome message for current character
964
  if (displayWelcomeMessage) {
@@ -968,6 +1082,9 @@ function clearChat(target) {
968
  })
969
  }
970
 
 
 
 
971
  function loadChat() {
972
  if (chat_history) {
973
  checkClearChatDisplay();
@@ -1063,7 +1180,9 @@ function loadChat() {
1063
  }
1064
 
1065
 
1066
- //Check Clear Chat display
 
 
1067
  function checkClearChatDisplay() {
1068
  if (array_widgets[data_index] && array_widgets[data_index].last_chat && array_widgets[data_index].last_chat.length > 1) {
1069
  if (chat_history) {
@@ -1073,10 +1192,12 @@ function checkClearChatDisplay() {
1073
  $("#clear-chat").hide();
1074
  }
1075
 
 
1076
  const hasLastChat = array_widgets.some((result) => {
1077
  return result.last_chat && result.last_chat.length > 2;
1078
  });
1079
 
 
1080
  if (hasLastChat) {
1081
  $("#clear-all-chats").show();
1082
  } else {
@@ -1084,12 +1205,16 @@ function checkClearChatDisplay() {
1084
  }
1085
  }
1086
 
1087
- //Error messages
 
 
1088
  function hideFeedback() {
1089
  toastr.remove()
1090
  }
1091
 
1092
- //Force chat to scroll down
 
 
1093
  function scrollChatBottom() {
1094
 
1095
  if (/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent)) {
@@ -1110,7 +1235,9 @@ function scrollChatBottom() {
1110
 
1111
  }
1112
 
1113
- //Enable chat input
 
 
1114
  function enableChat() {
1115
  $(".character-typing").css('visibility', 'hidden')
1116
  $(".btn-send-chat,#chat").attr("disabled", false);
@@ -1122,10 +1249,11 @@ function enableChat() {
1122
  $('#chat').focus();
1123
  }, 500);
1124
  }
1125
-
1126
  }
1127
 
1128
- //Disable chat input
 
 
1129
  function disableChat() {
1130
  $(".character-typing").css('visibility', 'visible')
1131
  $(".character-typing").css('display', 'flex');
@@ -1135,6 +1263,11 @@ function disableChat() {
1135
  $(".btn-cancel-chat").show();
1136
  }
1137
 
 
 
 
 
 
1138
  function createTextFile(data) {
1139
  let text = "";
1140
 
@@ -1153,6 +1286,9 @@ function createTextFile(data) {
1153
  return blob;
1154
  }
1155
 
 
 
 
1156
  function downloadPdf() {
1157
 
1158
  var docDefinition = {
@@ -1210,7 +1346,11 @@ function downloadPdf() {
1210
  pdfMakeInstance.download('chat.pdf');
1211
  }
1212
 
1213
- // Function to download the file
 
 
 
 
1214
  function downloadFile(blob, fileName) {
1215
  // Create a URL object with the Blob
1216
  const url = URL.createObjectURL(blob);
@@ -1228,13 +1368,17 @@ function downloadFile(blob, fileName) {
1228
  document.body.removeChild(link);
1229
  }
1230
 
1231
- // Function to handle the download button click event
 
 
1232
  function handleDownload() {
1233
  const blob = createTextFile(array_chat);
1234
  downloadFile(blob, "chat.txt");
1235
  }
1236
 
1237
- //Chat audio
 
 
1238
  $(document).on("click", ".chat-audio", function () {
1239
  var $this = $(this);
1240
  var $img = $this.find("img");
@@ -1253,16 +1397,21 @@ $(document).on("click", ".chat-audio", function () {
1253
  if (!play) {
1254
  cancelSpeechSynthesis();
1255
 
1256
- // Remove botão de cópia do texto antes de sintetizar a fala
1257
  var chatResponseText = $chatResponse.html().replace(/<button\b[^>]*\bclass="[^"]*\bcopy-code\b[^"]*"[^>]*>.*?<\/button>/ig, "");
1258
 
1259
- // Verifica se o recurso é suportado antes de chamar a função
1260
  if ('speechSynthesis' in window) {
1261
  doSpeechSynthesis(chatResponseText, $chatResponse);
1262
  }
1263
  }
1264
  });
1265
 
 
 
 
 
 
1266
  function cleanStringToSynthesis(str) {
1267
  str = str.trim()
1268
  .replace(/<[^>]*>/g, "")
@@ -1272,13 +1421,20 @@ function cleanStringToSynthesis(str) {
1272
  return str;
1273
  }
1274
 
 
 
 
1275
  function cancelSpeechSynthesis() {
1276
  if (window.speechSynthesis) {
1277
  window.speechSynthesis.cancel();
1278
  }
1279
  }
1280
 
1281
-
 
 
 
 
1282
  function doSpeechSynthesis(longText, chatResponse) {
1283
 
1284
  $("span.chat-response-highlight").each(function () {
@@ -1362,14 +1518,24 @@ function doSpeechSynthesis(longText, chatResponse) {
1362
  speakTextParts();
1363
  }
1364
 
 
 
 
 
1365
  window.speechSynthesis.onvoiceschanged = function () {
1366
  getTextToSpeechVoices();
1367
  };
1368
 
 
 
 
1369
  function displayVoices() {
1370
  console.table(array_voices)
1371
  }
1372
 
 
 
 
1373
  function getTextToSpeechVoices() {
1374
  window.speechSynthesis.getVoices().forEach(function (voice) {
1375
  const voiceObj = {
@@ -1380,21 +1546,27 @@ function getTextToSpeechVoices() {
1380
  });
1381
  }
1382
 
1383
- //Display employees description
 
 
 
1384
  const myModalEl = document.getElementById('modalDefault')
1385
  myModalEl.addEventListener('show.bs.modal', event => {
1386
  $("#modalDefault .modal-body").html(array_widgets[data_index].description);
1387
  })
1388
 
 
 
 
 
1389
  const myModalConfig = document.getElementById('modalConfig')
1390
  myModalConfig.addEventListener('show.bs.modal', event => {
1391
  loadSettings(); // Cargar los ajustes al cargar la página
1392
- //console.log('Load settings');
1393
- //$("#modalConfig .modal-title").html(array_widgets[data_index].name);
1394
- //$("#modalConfig .modal-body").html(array_widgets[data_index].description);
1395
  })
1396
 
1397
- // Define the key for the localStorage storage item
 
 
1398
  const localStorageKey = "col-contacts-border-display";
1399
 
1400
  // Get the current display state of the div from localStorage, if it exists
@@ -1406,7 +1578,9 @@ if (displayState) {
1406
  $(".col-contacts-border").css("display", "none");
1407
  }
1408
 
1409
- // Add the click event to toggle the display state of the div
 
 
1410
  $(".toggle_employees_list").on("click", function () {
1411
  $(".col-contacts-border").toggle();
1412
 
@@ -1417,7 +1591,9 @@ $(".toggle_employees_list").on("click", function () {
1417
  localStorage.setItem(localStorageKey, displayState);
1418
  });
1419
 
1420
-
 
 
1421
  toastr.options = {
1422
  "closeButton": true,
1423
  "debug": false,
@@ -1436,12 +1612,19 @@ toastr.options = {
1436
  "hideMethod": "fadeOut"
1437
  }
1438
 
1439
-
1440
  const textarea = document.querySelector('#chat');
 
 
1441
  const microphoneButton = document.querySelector('#microphone-button');
1442
 
 
1443
  let isTranscribing = false; // Initially not transcribing
1444
 
 
 
 
 
1445
  function loadSpeechRecognition() {
1446
  if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) {
1447
  recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
@@ -1467,16 +1650,16 @@ function loadSpeechRecognition() {
1467
  console.log('microphone off');
1468
  $(".btn-send-chat").attr("disabled", false);
1469
  $("#microphone-button").attr("src", "img/mic-start.svg")
1470
- isTranscribing = false; // Define a transcrição como encerrada
1471
  });
1472
 
1473
  microphoneButton.addEventListener('click', () => {
1474
  if (!isTranscribing) {
1475
- // Start transcription if not transcrivendo
1476
  recognition.start();
1477
  isTranscribing = true;
1478
  } else {
1479
- // Stop transcription if already transcribing
1480
  recognition.stop();
1481
  isTranscribing = false;
1482
  }
@@ -1487,6 +1670,10 @@ function loadSpeechRecognition() {
1487
  }
1488
  }
1489
 
 
 
 
 
1490
  function generateUUID() {
1491
  let d = new Date().getTime();
1492
  if (typeof performance !== 'undefined' && typeof performance.now === 'function') {
@@ -1499,23 +1686,29 @@ function generateUUID() {
1499
  });
1500
  }
1501
 
1502
- // Función para cargar los datos del localStorage al formulario si están disponibles
 
 
1503
  function loadSettings() {
1504
  const settings = getSettings();
1505
 
1506
- // Cargando valores por defecto
1507
  $('#voiceOfPlayback').val(settings.voiceOfPlayback);
1508
  $('#microphoneLanguage').val(settings.microphoneLanguage);
1509
  $('#answersToggle').prop('checked', settings.answersToggle);
1510
  }
1511
 
 
 
 
 
1512
  function getSettings() {
1513
  let settings = '';
1514
  const textTalkSettings = localStorage.getItem('text-talk-settings');
1515
  if (textTalkSettings) {
1516
  settings = JSON.parse(textTalkSettings);
1517
  } else {
1518
- settings = createAndSaveSettings(); // Llama a la función para crear y guardar los ajustes si no se encuentran en el localStorage
1519
  }
1520
  if(uuid == ''){
1521
  uuid = settings.id;
@@ -1523,7 +1716,10 @@ function getSettings() {
1523
  return settings;
1524
  }
1525
 
1526
- // Función para crear y guardar los ajustes en el localStorage
 
 
 
1527
  function createAndSaveSettings() {
1528
  const settings = {
1529
  id: generateUUID(),
@@ -1535,23 +1731,23 @@ function createAndSaveSettings() {
1535
  return settings;
1536
  }
1537
 
1538
- // Verifica si la síntesis de voz es compatible con el navegador
1539
  if ('speechSynthesis' in window) {
1540
- // Espera a que las voces estén cargadas antes de listarlas
1541
  window.speechSynthesis.onvoiceschanged = function () {
1542
- // Obtén todas las voces disponibles
1543
  const voices = speechSynthesis.getVoices();
1544
 
1545
- // Filtra las voces que tienen 'en' como prefijo para identificar las voces en inglés
1546
  const englishVoices = voices.filter(voice => voice.lang.startsWith('en'));
1547
 
1548
- // Obtén el elemento select por su id
1549
  const dropdown = document.getElementById('voiceOfPlayback');
1550
 
1551
- // Eliminar las opciones anteriores del dropdown
1552
  dropdown.innerHTML = '';
1553
 
1554
- // Pobla el dropdown con las voces en inglés disponibles
1555
  englishVoices.forEach(function (voice) {
1556
  const option = document.createElement('option');
1557
  option.value = `${voice.lang}***${voice.name}`;
@@ -1560,23 +1756,23 @@ if ('speechSynthesis' in window) {
1560
  });
1561
  };
1562
  } else {
1563
- console.error('La síntesis de voz no es compatible con este navegador.');
1564
  }
1565
 
1566
- // Cargar idiomas de reconocimiento por micrófono
1567
  if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) {
1568
  const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
1569
 
1570
- // Obtén los idiomas soportados para el reconocimiento de voz
1571
  const supportedLanguages = { 'en-US': 'Google US English', 'en-GB': 'Google UK English' };
1572
 
1573
- // Obtener el elemento select por su id
1574
  const dropdown = document.getElementById('microphoneLanguage');
1575
 
1576
- // Eliminar las opciones anteriores del dropdown
1577
  dropdown.innerHTML = '';
1578
 
1579
- // Poblar el dropdown con los idiomas disponibles para el reconocimiento de voz
1580
  for (const langCode in supportedLanguages) {
1581
  if (Object.hasOwnProperty.call(supportedLanguages, langCode)) {
1582
  const langName = supportedLanguages[langCode];
@@ -1587,15 +1783,13 @@ if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) {
1587
  }
1588
  }
1589
  } else {
1590
- console.error('El reconocimiento de voz no es compatible con este navegador.');
1591
  }
1592
 
1593
-
1594
-
1595
  $(document).ready(function () {
1596
- // Manejador de evento para guardar los ajustes al enviar el formulario
1597
  $('#modal-settings-submit').click(function (event) {
1598
- event.preventDefault(); // Evitar que se envíe el formulario
1599
  let settings = getSettings();
1600
  settings = {
1601
  id: settings.id,
@@ -1608,7 +1802,7 @@ $(document).ready(function () {
1608
  $('#modalConfig').modal('hide');
1609
  });
1610
 
1611
- // Maneja el conteo de caracteres
1612
  $('#textArea').on('input', function () {
1613
  var maxLength = 4000;
1614
  var currentLength = $(this).val().length;
 
1
+ /*
2
+ Author: Rodolfo Torres
3
+ Email: rodolfo.torres@outlook.com
4
+ LinkedIn: https://www.linkedin.com/in/rodolfo-torres-p
5
+ License: This code is licensed under GPL-3.0
6
+
7
+ The code is licensed under the GPL-3.0 license, which is a widely used open-source license, ensuring that any derivative work is also open source.
8
+ It grants users the freedom to use, modify, and distribute the software, as well as any modifications or extensions made to it.
9
+ However, any modified versions of the software must also be licensed under GPL-3.0.
10
+
11
+ For more details, please refer to the full text of the GPL-3.0 license at https://www.gnu.org/licenses/gpl-3.0.html.
12
+ */
13
+
14
+
15
  /*/ Not change any values of the variables below,
16
  use the "json/config.json" file to make your settings. /*/
17
  let data_index = "";
 
32
  let lang_index = 0;
33
  let scrollPosition = 0;
34
 
 
35
  let use_text_stream = false;
36
  let display_microphone_in_chat = false;
37
  let display_avatar_in_chat = false;
 
69
  //Loads the characters from the config.json file and appends them to the initial slider
70
  loadData("json/config.json", ["json/prompts-" + user_prompt_lang + ".json", "json/lang.json", "json/badwords.json"]);
71
 
72
+ /**
73
+ * Function to load data from the given URL and an array of URLs using Promise.all and map functions.
74
+ *
75
+ * @param {string} url - The URL to fetch the data from.
76
+ * @param {Array} urls - An array of URLs to fetch additional data from.
77
+ * @returns {Promise} - A Promise that resolves with the fetched data and updates the necessary variables.
78
+ */
79
  function loadData(url, urls) {
80
  // Fetch data from the given url and an array of urls using Promise.all and map functions
81
  return Promise.all([fetch(url).then(res => res.json()), ...urls.map(url => fetch(url).then(res => res.json()))])
 
155
  }).catch(err => { throw err })
156
  }
157
 
158
+ /**
159
+ * Function to retrieve the current date and time.
160
+ *
161
+ * @returns {string} - A string representing the current date and time in a localized format.
162
+ */
163
  function currentDate() {
164
  const timestamp = new Date();
165
  return timestamp.toLocaleString();
 
169
  // Define a placeholder for the image
170
  const placeholder = "img/placeholder.svg";
171
 
172
+ /**
173
+ * Event listener for the scroll event that checks if the image is in the visible area.
174
+ */
175
  $(window).on("scroll", function () {
176
  $("img[data-src]").each(function () {
177
  if (isElementInViewport($(this))) {
 
181
  });
182
  });
183
 
184
+ /**
185
+ * Helper function to check if the element is in the visible area.
186
+ *
187
+ * @param {Object} el - The element to be checked.
188
+ * @returns {boolean} - A boolean indicating whether the element is in the visible area.
189
+ */
190
  function isElementInViewport(el) {
191
  const rect = el.get(0).getBoundingClientRect();
192
  return (
 
197
  );
198
  }
199
 
200
+ /**
201
+ * Main function of the chat API responsible for getting a response based on the provided prompt.
202
+ *
203
+ * @param {string} prompt - The prompt or message from the user.
204
+ * @returns {Promise<void>} - A Promise that resolves when the response is obtained and displayed in the chat.
205
+ */
206
  async function getResponse(prompt) {
207
 
208
  //Conversation history
209
  array_chat.push({ "name": "User", "message": prompt, "isImg": false, "date": currentDate() })
210
  array_messages = [];
211
 
212
+ //Converting chat to API model
213
  for (let i = 0; i < array_chat.length; i++) {
214
  let message = { "role": "", "content": "" };
215
 
 
231
  var slice_messages = max_num_chats_api - 2;
232
  array_messages = array_messages.slice(0, 2).concat(array_messages.slice(-slice_messages));
233
  }
 
 
 
 
 
234
 
235
  try {
236
  let question = array_messages[array_messages.length - 1].content;
 
241
  allow_bool = true;
242
  }
243
 
244
+ // Data to send to the server
245
  var questionData = {
246
  question: question,
247
  allow_bool: allow_bool,
248
  };
249
 
 
250
  const fullPrompt = "That is a responses' example maded in English to test capacities of that chat";
251
  const randomID = generateUniqueID();
252
  $("#overflow-chat").append(`
 
267
  </div>
268
  `);
269
 
270
+ // Make a POST request to the /answer_question endpoint
 
 
 
 
271
  $.ajax({
272
  type: "POST",
273
  url: `/answer_question/${uuid}`,
274
  data: JSON.stringify(questionData),
275
  contentType: "application/json",
276
  success: function (data) {
277
+ // The response is in data.answer
278
  var response = data.answer;
 
279
 
280
  $(".cursor").remove();
281
  str = $(`.${randomID}`).html();
 
305
  }
306
  }
307
 
308
+ /**
309
+ * Function to generate a unique ID with an optional prefix.
310
+ *
311
+ * @param {string} prefix - The optional prefix for the generated ID. Default is 'id_'.
312
+ * @returns {string} - A string representing the unique ID with the specified prefix and timestamp.
313
+ */
314
  function generateUniqueID(prefix = 'id_') {
315
  const timestamp = Date.now();
316
  return `${prefix}${timestamp}`;
317
  }
318
 
319
+ /**
320
+ * Function to stream the chat content based on the received source and randomID.
321
+ *
322
+ * @param {EventSource} source - The source of the event stream.
323
+ * @param {string} randomID - A string representing the unique ID for the chat.
324
+ * @returns {boolean} - A boolean indicating whether the streaming is successful or not.
325
+ */
326
  function streamChat(source, randomID) {
327
  let fullPrompt = "";
328
  let partPrompt = "";
 
378
  return;
379
  }
380
 
381
+ var choice = tokens.choices[0];
382
  partPrompt = "";
383
  if (choice.content || choice.text) {
384
  fullPrompt += choice.content || choice.text;
 
396
  }
397
 
398
 
399
+ /**
400
+ * Function to save the chat history into the local storage.
401
+ */
402
  function saveChatHistory() {
403
  /*
404
  if (array_widgets[data_index]) {
405
  array_widgets[data_index].last_chat = array_chat;
406
  }
407
  if(chat_history){
408
+ localStorage.setItem("text_talk_v1", JSON.stringify(array_widgets));
409
  }
410
  console.log("Saving...")
411
  */
412
  }
413
 
414
+ /**
415
+ * Function that appends the AI response in the chat in HTML.
416
+ *
417
+ * @param {string} response - The response message from the AI.
418
+ */
419
  function responseChat(response) {
420
 
421
  for (var i = 0; i < filterBotWords.length; i++) {
 
460
  checkClearChatDisplay();
461
  }
462
 
463
+ /**
464
+ * Function to append an image to the chat.
465
+ *
466
+ * @param {string} chat - The chat message.
467
+ */
468
  function appendChatImg(chat) {
469
  const imageID = Date.now();
470
  IAimagePrompt = chat.replace("/img ", "");
 
499
  $("#chat").val("");
500
  }
501
 
502
+ /**
503
+ * Function that sends the user's chat message to the chat in HTML and to the API.
504
+ *
505
+ */
506
  function sendUserChat() {
507
  let chat = $("#chat").val();
508
 
 
558
  disableChat();
559
  }
560
 
561
+ /**
562
+ * Send a message in the chat by pressing the Enter key.
563
+ *
564
+ * @param {object} e - The event object.
565
+ * @returns {boolean} - Returns false to prevent the default behavior of the Enter key.
566
+ */
567
  $("#chat").keypress(function (e) {
568
  if (e.which === 13 && !e.shiftKey) {
569
  sendUserChat();
 
571
  }
572
  });
573
 
574
+ /**
575
+ * Event listener for the click event on the chat send button.
576
+ * Calls the 'sendUserChat' function when the button is clicked.
577
+ */
578
  $(".btn-send-chat").on("click", function () {
579
  sendUserChat();
580
  })
581
 
582
 
583
+ /**
584
+ * Translates text elements in the HTML using the translation object.
585
+ */
 
 
586
  function translate() {
587
  translationObj = lang.translate[lang_index];
588
 
589
+ // Loop through all the keys in the translationObj object
590
  for (let key in translationObj) {
591
+ // Get the value of the current key
592
  let value = translationObj[key];
593
 
594
+ // Find all elements in the HTML that contain the block between {{ and }}
595
  let elements = document.body.querySelectorAll('*:not(script):not(style)');
596
  elements.forEach(function (element) {
597
  for (let i = 0; i < element.childNodes.length; i++) {
 
600
  let text = node.nodeValue;
601
  let regex = new RegExp(`{{\\s*${key}\\s*}}`, 'g');
602
  if (regex.test(text)) {
603
+ // Use the innerHTML property to interpret HTML tags
604
  node.parentElement.innerHTML = text.replace(regex, value);
605
  }
606
  } else if (node.nodeType === Node.ELEMENT_NODE) {
607
+ // For elements with HTML attributes, replace the key's value in the attribute
608
  let attributes = node.attributes;
609
  for (let j = 0; j < attributes.length; j++) {
610
  let attribute = attributes[j];
 
619
  }
620
  }
621
 
622
+ /**
623
+ * Closes the chat interface and shows the chat options.
624
+ * Restores the previous scroll position and adjusts the UI accordingly.
625
+ */
626
  function closeChat() {
627
  hideChat();
628
  enableChat();
 
637
  return false;
638
  }
639
 
640
+ /**
641
+ * Stops the ongoing chat conversation.
642
+ * Closes the chat source and enables the chat.
643
+ */
644
  function stopChat() {
645
  if (source) {
646
  enableChat();
 
649
  }
650
  }
651
 
652
+ /**
653
+ * Attaches an event listener to the cancel chat button.
654
+ * Calls the stopChat function on click event.
655
+ */
656
  $(".btn-cancel-chat").on("click", function () {
657
  stopChat();
658
  })
659
 
660
+ /**
661
+ * Listens for the Escape key event.
662
+ * Calls the closeChat function when the Escape key is pressed.
663
+ */
664
  document.addEventListener("keydown", function (event) {
665
  if (event.key === "Escape") {
666
  closeChat();
667
  }
668
  });
669
 
670
+ /**
671
+ * Hides the chat element.
672
+ * Calls the hideFeedback and cancelSpeechSynthesis functions.
673
+ * Shows the hide-section and hides the chat-background.
674
+ * Hides the overflow-chat if the user agent matches the specified mobile devices.
675
+ */
676
  function hideChat() {
677
  hideFeedback();
678
  cancelSpeechSynthesis();
 
681
  if (/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent)) {
682
  $("#overflow-chat").hide();
683
  }
 
684
  }
685
 
686
+ /**
687
+ * Adds an event to the send button to submit the provided text.
688
+ * Makes a POST call to the /store_text endpoint to store the text.
689
+ * Handles errors and displays Toastr messages as necessary.
690
+ */
691
  $('#sendButton').click(function (evt) {
692
  evt.preventDefault();
693
 
694
  var textData = {
695
+ text: $('#textArea').val(), // The text to be sent
696
  };
697
 
698
+ // Set Toastr position to top
699
  toastr.options.positionClass = 'toast-top-center';
700
 
701
+ // Check if the text variable is empty
702
  if (textData.text.trim() === '') {
703
  toastr.error("Error: Text cannot be empty.");
704
  return;
705
  }
706
 
707
+ // Disable the button and add a spinner
708
  var sendButton = $('#sendButton');
709
  sendButton.prop('disabled', true);
710
  sendButton.html('<span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Sending...');
711
 
712
+ // Make a POST call to the /store_text endpoint
713
  $.ajax({
714
  type: "POST",
715
  url: `/store_text/${uuid}`,
716
  data: JSON.stringify(textData),
717
  contentType: "application/json",
718
  success: function (data) {
719
+ // Enable the button again
720
  sendButton.prop('disabled', false);
721
  sendButton.html('Send');
722
 
723
  $('#textArea').val('');
724
+ // Close the modal after sending the text
725
  textModal.hide();
726
  displayChat(chatId);
727
  },
728
  error: function (xhr, status, error) {
729
+ // Check if there is a backend error code
730
  if (xhr.status === 400 || xhr.status === 500) {
731
  toastr.error(`Error: ${xhr.status} - ${error}`);
732
  } else {
733
  toastr.error("Error: Connection refused. Please try again later.");
734
  }
735
 
736
+ // Enable the button again
737
  sendButton.prop('disabled', false);
738
  sendButton.html('Send');
739
  }
740
  });
741
  });
742
 
743
+
744
+ /**
745
+ * Adds an event to the send button to upload the file.
746
+ * Makes a POST call to the /upload_file endpoint to upload the file.
747
+ * Handles errors and displays Toastr messages as necessary.
748
+ */
749
  $('#sendButton2').click(function (evt) {
750
  evt.preventDefault();
751
  var formData = new FormData($('#file-form')[0]);
752
  var sendButton = $('#sendButton2');
753
 
754
+ // Set Toastr position to top
755
  toastr.options.positionClass = 'toast-top-center';
756
 
757
  var fileInput = $('#fileInput')[0];
758
+ var fileSize = fileInput.files[0].size; // Size in bytes
759
+ var maxSize = 1*1024*1024; // 1MB in bytes
760
 
761
+ // Validate the file size
762
  if (fileSize > maxSize) {
763
  toastr.error('Error: File size exceeds 1MB limit.');
764
  return;
765
  }
766
 
767
+ // Disable the button and add a spinner
768
  sendButton.prop('disabled', true);
769
  sendButton.html('<span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Uploading...');
770
 
 
778
  processData: false,
779
  success: function (data) {
780
  $('#fileInput').val('');
781
+ // Enable the button again
782
  sendButton.prop('disabled', false);
783
  sendButton.html('Send');
784
 
785
+ // Close the modal after sending the text
786
  textModal.hide();
787
  displayChat(chatId);
788
  },
789
  error: function (xhr, status, error) {
790
+ // Show error message with Toastr
791
  toastr.error('Error uploading the file');
792
 
793
+ // Enable the button again
794
  sendButton.prop('disabled', false);
795
  sendButton.html('Send');
796
  }
797
  });
798
  });
799
 
800
+ /**
801
+ * Adds an event to the send button to send the URL.
802
+ * Makes a POST call to the /store_text endpoint to send the URL.
803
+ * Handles errors and displays Toastr messages as necessary.
804
+ */
805
  $('#sendButton3').click(function () {
806
  var textData = {
807
  html_url: $('#url').val(),
808
  };
809
 
810
+ // Set Toastr position to top
811
  toastr.options.positionClass = 'toast-top-center';
812
 
813
  var sendButton = $('#sendButton3');
814
 
815
+ // Check if the text variable is empty
816
  if (textData.html_url.trim() === '') {
817
  toastr.error("Error: URL cannot be empty.");
818
  return;
819
  }
820
 
821
+ // Validate the URL
822
  var urlRegex = new RegExp('^(https?:\\/\\/)?'+
823
  '((([a-z\\d]([a-z\\d-]*[a-z\\d])*)\\.)+[a-z]{2,}|'+
824
  '((\\d{1,3}\\.){3}\\d{1,3}))'+
 
830
  return;
831
  }
832
 
833
+ // Disable the button and add a spinner
834
  sendButton.prop('disabled', true);
835
  sendButton.html('<span class="spinner-border spinner-border-sm" role="status" aria-hidden="true"></span> Sending...');
836
 
837
+ // Make a POST call to the /store_text endpoint
838
  $.ajax({
839
  type: "POST",
840
  url: `/store_text/${uuid}`,
 
842
  contentType: "application/json",
843
  success: function (data) {
844
  $('#url').val('');
845
+ // Enable the button again
846
  sendButton.prop('disabled', false);
847
  sendButton.html('Send');
848
 
849
+ // Close the modal after sending the text
850
  textModal.hide();
851
  displayChat(chatId);
852
  },
 
857
  toastr.error(`Error: ${xhr.status} - ${error}`);
858
  }
859
 
860
+ // Enable the button again
861
  sendButton.prop('disabled', false);
862
  sendButton.html('Send');
863
  }
 
866
 
867
 
868
 
869
+ /**
870
+ * Attaches a click event to the elements with the "start-chat" class.
871
+ * Displays different modals based on the data-index attribute of the clicked element.
872
+ */
873
  $(document).delegate(".start-chat", "click", function () {
874
  chatId = $(this).attr("data-index");
875
  if (chatId == 0) {
 
888
  });
889
  textModal.show();
890
  }
 
 
891
  })
892
 
893
+ /**
894
+ * Displays the chat based on the provided index.
895
+ * Sets up the necessary variables and elements for the chat display.
896
+ * @param {number} index - The index of the chat to be displayed.
897
+ */
898
  function displayChat(index) {
899
  data_index = index;
900
  cancelSpeechSynthesis();
 
945
  translate();
946
  }
947
 
948
+ /**
949
+ * Escapes special characters in a string with their corresponding HTML codes.
950
+ * @param {string} str - The input string to be escaped.
951
+ * @returns {string} - The string with escaped characters.
952
+ */
953
  const escapeHtml = (str) => {
954
 
955
  // Check if the string contains <code> or <pre> tags
 
988
  return str;
989
  };
990
 
991
+ /**
992
+ * Copies the text content to the clipboard.
993
+ * @param {HTMLElement} button - The button element that triggers the copy action.
994
+ */
995
  function copyText(button) {
996
  const div = button.parentElement;
997
  const code = div.querySelector('.chat-response');
 
1004
  button.innerHTML = lang["translate"][lang_index].copy_text2;
1005
  }
1006
 
1007
+ /**
1008
+ * Copies the content of the <pre> tag to the clipboard.
1009
+ * @param {HTMLElement} button - The button element that triggers the copy action.
1010
+ */
1011
  function copyCode(button) {
1012
  const pre = button.parentElement;
1013
  const code = pre.querySelector('code');
 
1020
  button.innerHTML = lang["translate"][lang_index].copy_code2;
1021
  }
1022
 
1023
+ /**
1024
+ * Clears the chat history for the specified target. Displays a confirmation dialog before clearing.
1025
+ * @param {string} target - The target for clearing the chat history. Can be "all" to clear all characters' chat history or "current" to clear the current character's chat history.
1026
+ */
1027
  function clearChat(target) {
1028
  // Display confirmation dialog using SweetAlert2 library
1029
  Swal.fire({
 
1072
  "date": currentDate()
1073
  })
1074
  // Save updated character data to local storage
1075
+ localStorage.setItem("text_talk_v1", JSON.stringify(array_widgets));
1076
 
1077
  // If enabled, display welcome message for current character
1078
  if (displayWelcomeMessage) {
 
1082
  })
1083
  }
1084
 
1085
+ /**
1086
+ * Loads the chat history for the current character from the local storage.
1087
+ */
1088
  function loadChat() {
1089
  if (chat_history) {
1090
  checkClearChatDisplay();
 
1180
  }
1181
 
1182
 
1183
+ /**
1184
+ * Checks the display for the "Clear Chat" option based on the chat history for the current character.
1185
+ */
1186
  function checkClearChatDisplay() {
1187
  if (array_widgets[data_index] && array_widgets[data_index].last_chat && array_widgets[data_index].last_chat.length > 1) {
1188
  if (chat_history) {
 
1192
  $("#clear-chat").hide();
1193
  }
1194
 
1195
+ // Check if there is chat history for any character
1196
  const hasLastChat = array_widgets.some((result) => {
1197
  return result.last_chat && result.last_chat.length > 2;
1198
  });
1199
 
1200
+ // Display or hide the "Clear All Chats" option based on the presence of chat history
1201
  if (hasLastChat) {
1202
  $("#clear-all-chats").show();
1203
  } else {
 
1205
  }
1206
  }
1207
 
1208
+ /**
1209
+ * Hides the error messages shown on the screen.
1210
+ */
1211
  function hideFeedback() {
1212
  toastr.remove()
1213
  }
1214
 
1215
+ /**
1216
+ * Forces the chat to scroll to the bottom of the conversation.
1217
+ */
1218
  function scrollChatBottom() {
1219
 
1220
  if (/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent)) {
 
1235
 
1236
  }
1237
 
1238
+ /**
1239
+ * Enables the chat input by setting the appropriate attributes and focusing on the chat input box.
1240
+ */
1241
  function enableChat() {
1242
  $(".character-typing").css('visibility', 'hidden')
1243
  $(".btn-send-chat,#chat").attr("disabled", false);
 
1249
  $('#chat').focus();
1250
  }, 500);
1251
  }
 
1252
  }
1253
 
1254
+ /**
1255
+ * Disables the chat input by setting the appropriate attributes and adjusting the visibility of certain elements.
1256
+ */
1257
  function disableChat() {
1258
  $(".character-typing").css('visibility', 'visible')
1259
  $(".character-typing").css('display', 'flex');
 
1263
  $(".btn-cancel-chat").show();
1264
  }
1265
 
1266
+ /**
1267
+ * Creates a text file based on the data provided.
1268
+ * @param {Array} data - An array containing chat data.
1269
+ * @returns {Blob} A Blob object representing the text file.
1270
+ */
1271
  function createTextFile(data) {
1272
  let text = "";
1273
 
 
1286
  return blob;
1287
  }
1288
 
1289
+ /**
1290
+ * Generates and downloads a PDF document based on the chat messages.
1291
+ */
1292
  function downloadPdf() {
1293
 
1294
  var docDefinition = {
 
1346
  pdfMakeInstance.download('chat.pdf');
1347
  }
1348
 
1349
+ /**
1350
+ * Downloads a file with the provided Blob and filename.
1351
+ * @param {Blob} blob - The Blob object to be downloaded.
1352
+ * @param {string} fileName - The name of the file to be downloaded.
1353
+ */
1354
  function downloadFile(blob, fileName) {
1355
  // Create a URL object with the Blob
1356
  const url = URL.createObjectURL(blob);
 
1368
  document.body.removeChild(link);
1369
  }
1370
 
1371
+ /**
1372
+ * Handles the download button click event.
1373
+ */
1374
  function handleDownload() {
1375
  const blob = createTextFile(array_chat);
1376
  downloadFile(blob, "chat.txt");
1377
  }
1378
 
1379
+ /**
1380
+ * Handles the chat audio functionality.
1381
+ */
1382
  $(document).on("click", ".chat-audio", function () {
1383
  var $this = $(this);
1384
  var $img = $this.find("img");
 
1397
  if (!play) {
1398
  cancelSpeechSynthesis();
1399
 
1400
+ // Remove the text copy button before synthesizing speech
1401
  var chatResponseText = $chatResponse.html().replace(/<button\b[^>]*\bclass="[^"]*\bcopy-code\b[^"]*"[^>]*>.*?<\/button>/ig, "");
1402
 
1403
+ // Checks if the feature is supported before calling the function
1404
  if ('speechSynthesis' in window) {
1405
  doSpeechSynthesis(chatResponseText, $chatResponse);
1406
  }
1407
  }
1408
  });
1409
 
1410
+ /**
1411
+ * Cleans the string for speech synthesis by removing unwanted characters and tags.
1412
+ * @param {string} str - The string to be cleaned.
1413
+ * @returns {string} - The cleaned string.
1414
+ */
1415
  function cleanStringToSynthesis(str) {
1416
  str = str.trim()
1417
  .replace(/<[^>]*>/g, "")
 
1421
  return str;
1422
  }
1423
 
1424
+ /**
1425
+ * Cancels the ongoing speech synthesis.
1426
+ */
1427
  function cancelSpeechSynthesis() {
1428
  if (window.speechSynthesis) {
1429
  window.speechSynthesis.cancel();
1430
  }
1431
  }
1432
 
1433
+ /**
1434
+ * Performs text-to-speech synthesis for long text.
1435
+ * @param {string} longText - The long text to be synthesized.
1436
+ * @param {jQuery} chatResponse - The jQuery element representing the chat response.
1437
+ */
1438
  function doSpeechSynthesis(longText, chatResponse) {
1439
 
1440
  $("span.chat-response-highlight").each(function () {
 
1518
  speakTextParts();
1519
  }
1520
 
1521
+ /**
1522
+ * Callback function triggered when the available voices change.
1523
+ * Retrieves the available text-to-speech voices.
1524
+ */
1525
  window.speechSynthesis.onvoiceschanged = function () {
1526
  getTextToSpeechVoices();
1527
  };
1528
 
1529
+ /**
1530
+ * Displays the available voices in the console.
1531
+ */
1532
  function displayVoices() {
1533
  console.table(array_voices)
1534
  }
1535
 
1536
+ /**
1537
+ * Retrieves the available text-to-speech voices.
1538
+ */
1539
  function getTextToSpeechVoices() {
1540
  window.speechSynthesis.getVoices().forEach(function (voice) {
1541
  const voiceObj = {
 
1546
  });
1547
  }
1548
 
1549
+ /**
1550
+ * Event listener to display the item's description when the default modal is shown.
1551
+ * @param {Event} event - The event object.
1552
+ */
1553
  const myModalEl = document.getElementById('modalDefault')
1554
  myModalEl.addEventListener('show.bs.modal', event => {
1555
  $("#modalDefault .modal-body").html(array_widgets[data_index].description);
1556
  })
1557
 
1558
+ /**
1559
+ * Event listener to load the settings when the configuration modal is shown.
1560
+ * Loads the settings upon page load.
1561
+ */
1562
  const myModalConfig = document.getElementById('modalConfig')
1563
  myModalConfig.addEventListener('show.bs.modal', event => {
1564
  loadSettings(); // Cargar los ajustes al cargar la página
 
 
 
1565
  })
1566
 
1567
+ /**
1568
+ * Key for the localStorage storage item.
1569
+ */
1570
  const localStorageKey = "col-contacts-border-display";
1571
 
1572
  // Get the current display state of the div from localStorage, if it exists
 
1578
  $(".col-contacts-border").css("display", "none");
1579
  }
1580
 
1581
+ /**
1582
+ * Add the click event to toggle the display state of the div.
1583
+ */
1584
  $(".toggle_employees_list").on("click", function () {
1585
  $(".col-contacts-border").toggle();
1586
 
 
1591
  localStorage.setItem(localStorageKey, displayState);
1592
  });
1593
 
1594
+ /**
1595
+ * Toastr options for displaying notifications.
1596
+ */
1597
  toastr.options = {
1598
  "closeButton": true,
1599
  "debug": false,
 
1612
  "hideMethod": "fadeOut"
1613
  }
1614
 
1615
+ // Select the chat textarea element
1616
  const textarea = document.querySelector('#chat');
1617
+
1618
+ // Select the microphone button element
1619
  const microphoneButton = document.querySelector('#microphone-button');
1620
 
1621
+ // Initialize a variable to keep track of whether the system is transcribing speech or not
1622
  let isTranscribing = false; // Initially not transcribing
1623
 
1624
+ /**
1625
+ * Loads the speech recognition functionality if supported by the browser.
1626
+ * Initiates the speech recognition functionality and handles the start and end events, as well as the result event.
1627
+ */
1628
  function loadSpeechRecognition() {
1629
  if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) {
1630
  recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
 
1650
  console.log('microphone off');
1651
  $(".btn-send-chat").attr("disabled", false);
1652
  $("#microphone-button").attr("src", "img/mic-start.svg")
1653
+ isTranscribing = false; // Define transcription as finished
1654
  });
1655
 
1656
  microphoneButton.addEventListener('click', () => {
1657
  if (!isTranscribing) {
1658
+ // Start transcription if not already transcribing
1659
  recognition.start();
1660
  isTranscribing = true;
1661
  } else {
1662
+ /// Stop transcription if already transcribing
1663
  recognition.stop();
1664
  isTranscribing = false;
1665
  }
 
1670
  }
1671
  }
1672
 
1673
+ /**
1674
+ * Generates a unique identifier (UUID) using the current timestamp and a random number.
1675
+ * @returns {string} A string representing the generated UUID.
1676
+ */
1677
  function generateUUID() {
1678
  let d = new Date().getTime();
1679
  if (typeof performance !== 'undefined' && typeof performance.now === 'function') {
 
1686
  });
1687
  }
1688
 
1689
+ /**
1690
+ * Loads the data from localStorage into the form if available.
1691
+ */
1692
  function loadSettings() {
1693
  const settings = getSettings();
1694
 
1695
+ /// Loading default values
1696
  $('#voiceOfPlayback').val(settings.voiceOfPlayback);
1697
  $('#microphoneLanguage').val(settings.microphoneLanguage);
1698
  $('#answersToggle').prop('checked', settings.answersToggle);
1699
  }
1700
 
1701
+ /**
1702
+ * Retrieves the user settings from localStorage or creates and saves default settings if not found.
1703
+ * @returns {object} - The user settings.
1704
+ */
1705
  function getSettings() {
1706
  let settings = '';
1707
  const textTalkSettings = localStorage.getItem('text-talk-settings');
1708
  if (textTalkSettings) {
1709
  settings = JSON.parse(textTalkSettings);
1710
  } else {
1711
+ settings = createAndSaveSettings(); // Calls the function to create and save settings if not found in localStorage
1712
  }
1713
  if(uuid == ''){
1714
  uuid = settings.id;
 
1716
  return settings;
1717
  }
1718
 
1719
+ /**
1720
+ * Creates and saves the settings in the localStorage.
1721
+ * @returns {object} - The created settings.
1722
+ */
1723
  function createAndSaveSettings() {
1724
  const settings = {
1725
  id: generateUUID(),
 
1731
  return settings;
1732
  }
1733
 
1734
+ // Check if the voice synthesis is supported by the browser
1735
  if ('speechSynthesis' in window) {
1736
+ // Wait for the voices to be loaded before listing them
1737
  window.speechSynthesis.onvoiceschanged = function () {
1738
+ // Get all available voices
1739
  const voices = speechSynthesis.getVoices();
1740
 
1741
+ // Filter voices that have 'en' as a prefix to identify English voices
1742
  const englishVoices = voices.filter(voice => voice.lang.startsWith('en'));
1743
 
1744
+ // Get the select element by its id
1745
  const dropdown = document.getElementById('voiceOfPlayback');
1746
 
1747
+ // Remove previous options from the dropdown
1748
  dropdown.innerHTML = '';
1749
 
1750
+ // Populate the dropdown with available English voices
1751
  englishVoices.forEach(function (voice) {
1752
  const option = document.createElement('option');
1753
  option.value = `${voice.lang}***${voice.name}`;
 
1756
  });
1757
  };
1758
  } else {
1759
+ console.error('Voice synthesis is not supported by this browser.');
1760
  }
1761
 
1762
+ // Load microphone recognition languages
1763
  if ('SpeechRecognition' in window || 'webkitSpeechRecognition' in window) {
1764
  const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
1765
 
1766
+ // Get supported languages for voice recognition
1767
  const supportedLanguages = { 'en-US': 'Google US English', 'en-GB': 'Google UK English' };
1768
 
1769
+ // Get the select element by its id
1770
  const dropdown = document.getElementById('microphoneLanguage');
1771
 
1772
+ // Remove previous options from the dropdown
1773
  dropdown.innerHTML = '';
1774
 
1775
+ // Populate the dropdown with available languages for voice recognition
1776
  for (const langCode in supportedLanguages) {
1777
  if (Object.hasOwnProperty.call(supportedLanguages, langCode)) {
1778
  const langName = supportedLanguages[langCode];
 
1783
  }
1784
  }
1785
  } else {
1786
+ console.error('Voice recognition is not supported by this browser.');
1787
  }
1788
 
 
 
1789
  $(document).ready(function () {
1790
+ // Event handler for saving settings when submitting the form
1791
  $('#modal-settings-submit').click(function (event) {
1792
+ event.preventDefault(); // Prevent the form from being submitted
1793
  let settings = getSettings();
1794
  settings = {
1795
  id: settings.id,
 
1802
  $('#modalConfig').modal('hide');
1803
  });
1804
 
1805
+ // Handle character count
1806
  $('#textArea').on('input', function () {
1807
  var maxLength = 4000;
1808
  var currentLength = $(this).val().length;