imperialwool commited on
Commit
67288d4
1 Parent(s): 3cd9e10

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +12 -9
gradio_app.py CHANGED
@@ -56,7 +56,7 @@ with open('system.prompt', 'r', encoding='utf-8') as f:
56
  prompt = f.read()
57
 
58
  def generate_answer(request: str, max_tokens: int = 256, language: str = "en", custom_prompt: str = None):
59
- print("Request:", request, "\nMax tokens:", max_tokens, "\nLanguage:", language, "\nCustom prompt:", custom_prompt, "\n")
60
  try:
61
  maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
62
  if isinstance(custom_prompt, str):
@@ -64,13 +64,13 @@ def generate_answer(request: str, max_tokens: int = 256, language: str = "en", c
64
  else:
65
  userPrompt = prompt + "\n\nUser: " + request + "\nAssistant: "
66
  except:
67
- return "Not enough data! Check that you passed all needed data."
68
 
69
  try:
70
  output = llm(userPrompt, max_tokens=maxTokens, stop=["User:"], echo=False)
71
  text = output["choices"][0]["text"]
72
  if language in languages:
73
- print("Translating from en to", language)
74
  encoded_input = translator_tokenizer(text, return_tensors="pt")
75
  generated_tokens = translator_model.generate(
76
  **encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(language)
@@ -78,13 +78,13 @@ def generate_answer(request: str, max_tokens: int = 256, language: str = "en", c
78
  translated_text = translator_tokenizer.batch_decode(
79
  generated_tokens, skip_special_tokens=True
80
  )[0]
81
- print("Translated:", translated_text, "\nOriginal:", text)
82
- return translated_text
83
- print(text)
84
- return text
85
  except Exception as e:
86
  print(e)
87
- return "Oops! Internal server error. Check the logs of space/instance."
88
  print("\n\n\n")
89
 
90
  print("! LOAD GRADIO INTERFACE !")
@@ -96,7 +96,10 @@ demo = gr.Interface(
96
  gr.components.Dropdown(label="Target Language", value="en", choices=["en"]+languages),
97
  gr.components.Textbox(label="Custom system prompt"),
98
  ],
99
- outputs=["text"],
 
 
 
100
  title=title,
101
  description=desc,
102
  allow_flagging='never'
 
56
  prompt = f.read()
57
 
58
  def generate_answer(request: str, max_tokens: int = 256, language: str = "en", custom_prompt: str = None):
59
+ logs = f"Request: {request}\nMax tokens: {max_tokens}\nLanguage: {language}\nCustom prompt: {custom_prompt}\n"
60
  try:
61
  maxTokens = max_tokens if 16 <= max_tokens <= 256 else 64
62
  if isinstance(custom_prompt, str):
 
64
  else:
65
  userPrompt = prompt + "\n\nUser: " + request + "\nAssistant: "
66
  except:
67
+ return "Not enough data! Check that you passed all needed data.", logs
68
 
69
  try:
70
  output = llm(userPrompt, max_tokens=maxTokens, stop=["User:"], echo=False)
71
  text = output["choices"][0]["text"]
72
  if language in languages:
73
+ logs += f"\nTranslating from en to {language}"
74
  encoded_input = translator_tokenizer(text, return_tensors="pt")
75
  generated_tokens = translator_model.generate(
76
  **encoded_input, forced_bos_token_id=translator_tokenizer.get_lang_id(language)
 
78
  translated_text = translator_tokenizer.batch_decode(
79
  generated_tokens, skip_special_tokens=True
80
  )[0]
81
+ logs += f"\nTranslated: {translated_text}\nOriginal: {text}"
82
+ return translated_text, logs
83
+ logs += f"\nOriginal: {text}"
84
+ return text, logs
85
  except Exception as e:
86
  print(e)
87
+ return "Oops! Internal server error. Check the logs of space/instance.", logs
88
  print("\n\n\n")
89
 
90
  print("! LOAD GRADIO INTERFACE !")
 
96
  gr.components.Dropdown(label="Target Language", value="en", choices=["en"]+languages),
97
  gr.components.Textbox(label="Custom system prompt"),
98
  ],
99
+ outputs=[
100
+ gr.components.Textbox(label="Output"),
101
+ gr.components.Textbox(label="Logs")
102
+ ],
103
  title=title,
104
  description=desc,
105
  allow_flagging='never'