tamas.kiss commited on
Commit
1c68ca4
1 Parent(s): d580c4b

Update output format

Browse files
Files changed (1) hide show
  1. app.py +53 -18
app.py CHANGED
@@ -262,24 +262,45 @@ def text_to_text_generation(verbose, prompt):
262
  response_num = 0 if "0" in response else (1 if "1" in response else 2)
263
 
264
  def create_generation_prompt(response_num, prompt, retriever):
 
265
  match response_num:
266
  case 0:
267
  prompt = f"[INST] {prompt}\n Lets think step by step. [/INST] {start_template}"
268
 
269
  case 1:
270
  if retriever == "semantic_search":
 
271
  retrieved_results = semantic_search(prompt)
272
  prompt = f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: {retrieved_results} </s>\n<s> [INST] Answer the following question: {prompt} [/INST]\nAnswer: "
 
 
 
 
 
 
 
 
 
273
  elif retriever == "google_search":
274
  retrieved_results = google_search(prompt)
 
275
  prompt = f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: {retrieved_results} </s>\n<s> [INST] Answer the following question: {prompt} [/INST]\nAnswer: "
 
 
 
 
 
 
 
 
 
276
  else:
277
  prompt = f"[INST] Answer the following question: {prompt} [/INST]\nAnswer: "
278
 
279
  case _:
280
  prompt = f"[INST] {prompt} [/INST]"
281
 
282
- return prompt
283
 
284
  def generate_batch(*prompts):
285
  tokenized_inputs = tokenizer(prompts, return_tensors="pt", padding=True).to("cuda")
@@ -307,7 +328,7 @@ def text_to_text_generation(verbose, prompt):
307
 
308
  return (prompt, answer[start:end].strip())
309
 
310
- modes = ["Kubectl command", "Kubernetes definition", "Normal"]
311
 
312
  print(f'{" Query Start ":-^40}')
313
  print("Classified as: " + modes[response_num])
@@ -317,23 +338,29 @@ def text_to_text_generation(verbose, prompt):
317
 
318
 
319
  if response_num == 2:
320
- prompt = create_generation_prompt(response_num, prompt, False)
321
  print('Prompt given to model:\n' + prompt + '\n')
322
  original, new = generate_batch(prompt)[0]
323
  prompt, response = cleanup(original, new)
324
  if verbose:
325
  return (
326
  f"# 📚KubeWizard📚\n"
327
- f"{modes}\n\n" f"# Prompt given to the model:\n" f"{str_to_md(prompt)}\n" f"# Model's answer:\n" f"{str_to_md(response)}\n"
 
 
 
 
328
  )
329
  else:
330
  return (
331
  f"# 📚KubeWizard📚\n"
332
- f"{modes}\n\n" f"# Answer:\n" f"{str_to_md(response)}"
 
 
333
  )
334
 
335
  if response_num == 0:
336
- prompt = create_generation_prompt(response_num, prompt, False)
337
  print('Prompt given to model:\n' + prompt + '\n')
338
  original, new = generate_batch(prompt)[0]
339
  prompt, response = cleanup(original, new)
@@ -341,6 +368,7 @@ def text_to_text_generation(verbose, prompt):
341
  if verbose:
342
  return (
343
  f"# 📚KubeWizard📚\n"
 
344
  f"{modes}\n\n"
345
  f"# Prompt given to the model:\n"
346
  f"{str_to_md(prompt)}\n"
@@ -352,14 +380,16 @@ def text_to_text_generation(verbose, prompt):
352
  else:
353
  return (
354
  f"# 📚KubeWizard📚\n"
355
- f"{modes}\n\n" f"# Answer:\n" f"```bash\n{str_to_md(response)}\n```\n"
 
 
356
  )
357
 
358
- res_prompt = create_generation_prompt(response_num, prompt, False)
359
  print(f'Prompt given to finetuned model:\n{res_prompt}\n')
360
- res_semantic_search_prompt = create_generation_prompt(response_num, prompt, "semantic_search")
361
  print(f'Prompt given to model with RAG:\n{res_semantic_search_prompt}\n')
362
- res_google_search_prompt = create_generation_prompt(response_num, prompt, "google_search")
363
  print(f'Prompt given to model with Google search:\n{res_google_search_prompt}\n')
364
 
365
  gen_normal, gen_semantic_search, gen_google_search = generate_batch(
@@ -373,26 +403,31 @@ def text_to_text_generation(verbose, prompt):
373
  if verbose:
374
  return (
375
  f"# 📚KubeWizard📚\n"
 
376
  f"{modes}\n\n"
 
377
  f"# Answer with finetuned model\n"
378
  f"## Prompt given to the model:\n"
379
  f"{str_to_md(res_prompt)}\n\n"
380
  f"## Model's answer:\n"
381
  f"{str_to_md(res_normal)}\n\n"
 
382
  f"# Answer with RAG\n"
383
- f"## Prompt given to the model:\n"
384
- f"{str_to_md(res_semantic_search_prompt)}\n\n"
385
- f"## Model's answer:\n"
386
- f"{str_to_md(res_semantic_search)}\n\n"
 
387
  f"# Answer with Google search\n"
388
- f"## Prompt given to the model:\n"
389
- f"{str_to_md(res_google_search_prompt)}\n\n"
390
- f"## Model's answer:\n"
391
- f"{str_to_md(res_google_search)}\n\n"
392
  )
393
  else:
394
  return (
395
  f"# 📚KubeWizard📚\n"
 
396
  f"{modes}\n\n"
397
  f"# Answer with finetuned model\n\n {str_to_md(res_normal)}\n"
398
  f"# Answer with RAG\n\n {str_to_md(res_semantic_search)}\n"
 
262
  response_num = 0 if "0" in response else (1 if "1" in response else 2)
263
 
264
  def create_generation_prompt(response_num, prompt, retriever):
265
+ md = ""
266
  match response_num:
267
  case 0:
268
  prompt = f"[INST] {prompt}\n Lets think step by step. [/INST] {start_template}"
269
 
270
  case 1:
271
  if retriever == "semantic_search":
272
+ question = prompt
273
  retrieved_results = semantic_search(prompt)
274
  prompt = f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: {retrieved_results} </s>\n<s> [INST] Answer the following question: {prompt} [/INST]\nAnswer: "
275
+
276
+ md = (
277
+ f"### Step 1: Preparing prompt for additional documentation\n"
278
+ f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation:\n"
279
+ f"### Step 2: Retrieving documentation from a book.\n"
280
+ f"{retrieved_results}\n"
281
+ f"### Step 3: Creating full prompt given to model\n"
282
+ f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: [RETRIEVED_RESULTS_FROM_BOOK] [INST] Answer the following question: {question} [/INST]\nAnswer: \n"
283
+ )
284
  elif retriever == "google_search":
285
  retrieved_results = google_search(prompt)
286
+ question = prompt
287
  prompt = f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: {retrieved_results} </s>\n<s> [INST] Answer the following question: {prompt} [/INST]\nAnswer: "
288
+
289
+ md = (
290
+ f"### Step 1: Preparing prompt for additional documentation\n"
291
+ f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation:\n"
292
+ f"### Step 2: Retrieving documentation from Google.\n"
293
+ f"{retrieved_results}\n"
294
+ f"### Step 3: Creating full prompt given to model\n"
295
+ f"You are a helpful kubernetes professional. [INST] Use the following documentation, if it is relevant to answer the question below. [/INST]\nDocumentation: [RETRIEVED_RESULTS_FROM_GOOGLE] [INST] Answer the following question: {question} [/INST]\nAnswer: \n"
296
+ )
297
  else:
298
  prompt = f"[INST] Answer the following question: {prompt} [/INST]\nAnswer: "
299
 
300
  case _:
301
  prompt = f"[INST] {prompt} [/INST]"
302
 
303
+ return prompt, md
304
 
305
  def generate_batch(*prompts):
306
  tokenized_inputs = tokenizer(prompts, return_tensors="pt", padding=True).to("cuda")
 
328
 
329
  return (prompt, answer[start:end].strip())
330
 
331
+ modes = ["Kubectl command", "Kubernetes definition", "Other"]
332
 
333
  print(f'{" Query Start ":-^40}')
334
  print("Classified as: " + modes[response_num])
 
338
 
339
 
340
  if response_num == 2:
341
+ prompt, md = create_generation_prompt(response_num, prompt, False)
342
  print('Prompt given to model:\n' + prompt + '\n')
343
  original, new = generate_batch(prompt)[0]
344
  prompt, response = cleanup(original, new)
345
  if verbose:
346
  return (
347
  f"# 📚KubeWizard📚\n"
348
+ f"# Classified your prompt as:\n"
349
+ f"{modes}\n\n"
350
+ f"# Prompt given to the model:\n"
351
+ f"{str_to_md(prompt)}\n"
352
+ f"# Model's answer:\n" f"{str_to_md(response)}\n"
353
  )
354
  else:
355
  return (
356
  f"# 📚KubeWizard📚\n"
357
+ f"# Classified your prompt as:\n"
358
+ f"{modes}\n\n"
359
+ f"# Answer:\n" f"{str_to_md(response)}"
360
  )
361
 
362
  if response_num == 0:
363
+ prompt, md = create_generation_prompt(response_num, prompt, False)
364
  print('Prompt given to model:\n' + prompt + '\n')
365
  original, new = generate_batch(prompt)[0]
366
  prompt, response = cleanup(original, new)
 
368
  if verbose:
369
  return (
370
  f"# 📚KubeWizard📚\n"
371
+ f"# Classified your prompt as:\n"
372
  f"{modes}\n\n"
373
  f"# Prompt given to the model:\n"
374
  f"{str_to_md(prompt)}\n"
 
380
  else:
381
  return (
382
  f"# 📚KubeWizard📚\n"
383
+ f"# Classified your prompt as:\n"
384
+ f"{modes}\n\n"
385
+ f"# Answer:\n" f"```bash\n{str_to_md(response)}\n```\n"
386
  )
387
 
388
+ res_prompt, res_md = create_generation_prompt(response_num, prompt, False)
389
  print(f'Prompt given to finetuned model:\n{res_prompt}\n')
390
+ res_semantic_search_prompt, res_semantic_search_md = create_generation_prompt(response_num, prompt, "semantic_search")
391
  print(f'Prompt given to model with RAG:\n{res_semantic_search_prompt}\n')
392
+ res_google_search_prompt, res_google_search_md = create_generation_prompt(response_num, prompt, "google_search")
393
  print(f'Prompt given to model with Google search:\n{res_google_search_prompt}\n')
394
 
395
  gen_normal, gen_semantic_search, gen_google_search = generate_batch(
 
403
  if verbose:
404
  return (
405
  f"# 📚KubeWizard📚\n"
406
+ f"# Classified your prompt as:\n"
407
  f"{modes}\n\n"
408
+ f"--------------------------------------------"
409
  f"# Answer with finetuned model\n"
410
  f"## Prompt given to the model:\n"
411
  f"{str_to_md(res_prompt)}\n\n"
412
  f"## Model's answer:\n"
413
  f"{str_to_md(res_normal)}\n\n"
414
+ f"--------------------------------------------"
415
  f"# Answer with RAG\n"
416
+ f"## Section 1: Preparing for generation\n"
417
+ f"{res_semantic_search_md}\n"
418
+ f"## Section 2: Generating answer\n"
419
+ f"{str_to_md(res_semantic_search)}\n"
420
+ f"--------------------------------------------"
421
  f"# Answer with Google search\n"
422
+ f"## Section 1: Preparing for generation\n"
423
+ f"{res_google_search_md}\n"
424
+ f"## Section 2: Generating answer\n"
425
+ f"{str_to_md(res_google_search)}\n"
426
  )
427
  else:
428
  return (
429
  f"# 📚KubeWizard📚\n"
430
+ f"# Classified your prompt as:\n"
431
  f"{modes}\n\n"
432
  f"# Answer with finetuned model\n\n {str_to_md(res_normal)}\n"
433
  f"# Answer with RAG\n\n {str_to_md(res_semantic_search)}\n"