HeshamHaroon commited on
Commit
b6be744
1 Parent(s): a08e6a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +68 -37
app.py CHANGED
@@ -2,15 +2,10 @@ from huggingface_hub import InferenceClient
2
  import gradio as gr
3
  from deep_translator import GoogleTranslator
4
 
 
5
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
6
-
7
- def translate_to_arabic(text):
8
- translator = GoogleTranslator(source='auto', target='ar')
9
- return translator.translate(text)
10
-
11
- def translate_to_english(text):
12
- translator = GoogleTranslator(source='auto', target='en')
13
- return translator.translate(text)
14
 
15
  def format_prompt(message, history):
16
  prompt = "<s>"
@@ -20,39 +15,75 @@ def format_prompt(message, history):
20
  prompt += f"[INST] {message} [/INST]"
21
  return prompt
22
 
23
- def generate(prompt, history=[]):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  # Translate the Arabic prompt to English
25
- prompt_in_english = translate_to_english(prompt)
26
- # Call the format_prompt function to format the input for the model
27
- formatted_prompt = format_prompt(prompt_in_english, history)
28
-
29
- generate_kwargs = {
30
- "temperature": 0.1,
31
- "max_new_tokens": 256,
32
- "top_p": 0.95,
33
- "repetition_penalty": 1.0,
34
- "do_sample": True,
35
- "seed": 42, # Seed for reproducibility, remove or change if randomness is preferred
36
- }
37
-
38
- # Generate the response from the model
39
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
40
-
41
  output = ""
42
- for response in stream:
43
- if hasattr(response, 'text'): # Checks if the 'text' attribute is present
44
- output += response.text
45
 
46
- # Translate the English response back to Arabic
47
- response_in_arabic = translate_to_arabic(output)
48
- return response_in_arabic
 
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- iface = gr.Interface(
52
  fn=generate,
53
- inputs=[gr.Textbox(lines=5, placeholder='Type your Arabic query here...', label='Arabic Query')],
54
- outputs='text',
55
- title="DorjGPT Arabic-English Translation Chatbot"
56
- )
57
-
58
- iface.launch()
 
2
  import gradio as gr
3
  from deep_translator import GoogleTranslator
4
 
5
+ # Initialize the InferenceClient and the translators
6
  client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
7
+ translator_to_en = GoogleTranslator(source='arabic', target='english')
8
+ translator_to_ar = GoogleTranslator(source='english', target='arabic')
 
 
 
 
 
 
9
 
10
  def format_prompt(message, history):
11
  prompt = "<s>"
 
15
  prompt += f"[INST] {message} [/INST]"
16
  return prompt
17
 
18
+ def generate(prompt, history, temperature=0.1, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
19
+ temperature = float(temperature)
20
+ if temperature < 1e-2:
21
+ temperature = 1e-2
22
+ top_p = float(top_p)
23
+
24
+ generate_kwargs = dict(
25
+ temperature=temperature,
26
+ max_new_tokens=max_new_tokens,
27
+ top_p=top_p,
28
+ repetition_penalty=repetition_penalty,
29
+ do_sample=True,
30
+ seed=42,
31
+ )
32
+
33
  # Translate the Arabic prompt to English
34
+ translated_prompt = translator_to_en.translate(prompt)
35
+ formatted_prompt = format_prompt(translated_prompt, history)
36
+
 
 
 
 
 
 
 
 
 
 
 
37
  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
 
38
  output = ""
 
 
 
39
 
40
+ for response in stream:
41
+ output += response.token.text
42
+ yield translator_to_ar.translate(output) # Translate the response back to Arabic
43
+ return output
44
 
45
+ additional_inputs=[
46
+ gr.Slider(
47
+ label="Temperature",
48
+ value=0.9,
49
+ minimum=0.0,
50
+ maximum=1.0,
51
+ step=0.05,
52
+ interactive=True,
53
+ info="Higher values produce more diverse outputs",
54
+ ),
55
+ gr.Slider(
56
+ label="Max new tokens",
57
+ value=256,
58
+ minimum=0,
59
+ maximum=1048,
60
+ step=64,
61
+ interactive=True,
62
+ info="The maximum numbers of new tokens",
63
+ ),
64
+ gr.Slider(
65
+ label="Top-p (nucleus sampling)",
66
+ value=0.90,
67
+ minimum=0.0,
68
+ maximum=1,
69
+ step=0.05,
70
+ interactive=True,
71
+ info="Higher values sample more low-probability tokens",
72
+ ),
73
+ gr.Slider(
74
+ label="Repetition penalty",
75
+ value=1.2,
76
+ minimum=1.0,
77
+ maximum=2.0,
78
+ step=0.05,
79
+ interactive=True,
80
+ info="Penalize repeated tokens",
81
+ )
82
+ ]
83
 
84
+ gr.ChatInterface(
85
  fn=generate,
86
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
87
+ additional_inputs=additional_inputs,
88
+ title="DorjGPT interface with Arabic Translation"
89
+ ).launch(show_api=True)