File size: 3,480 Bytes
5f1a7e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
from huggingface_hub import InferenceClient
import gradio as gr
from deep_translator import GoogleTranslator

# Initialize the Hugging Face Inference Client with the specific model
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")

# Function to translate Arabic text to English
def translate_to_english(text):
    return GoogleTranslator(source='arabic', target='english').translate(text)

# Function to translate English text to Arabic
def translate_to_arabic(text):
    return GoogleTranslator(source='english', target='arabic').translate(text)

# Function to format the prompt with conversation history
def format_prompt(message, history):
    prompt = "<s>"
    for user_prompt, bot_response in history:
        prompt += f"[INST] {user_prompt} [/INST]"
        prompt += f" {bot_response}</s> "
    prompt += f"[INST] {message} [/INST]"
    return prompt

# The main function to generate responses
def generate(prompt, history, temperature=0.1, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
    temperature = float(temperature)
    if temperature < 1e-2:
        temperature = 1e-2
    top_p = float(top_p)
    
    generate_kwargs = dict(
        temperature=temperature,
        max_new_tokens=max_new_tokens,
        top_p=top_p,
        repetition_penalty=repetition_penalty,
        do_sample=True,
        seed=42,
    )
    
    # Translate the Arabic prompt to English
    english_prompt = translate_to_english(prompt)
    
    formatted_prompt = format_prompt(english_prompt, history)
    stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
    output = ""
    
    for response in stream:
        output += response.token.text
        # Translate the English response back to Arabic before yielding it
        arabic_output = translate_to_arabic(output)
        yield arabic_output
    return arabic_output

# Additional input widgets for controlling the generation parameters
additional_inputs = [
    gr.Slider(
        label="Temperature",
        value=0.9,
        minimum=0.0,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="Higher values produce more diverse outputs",
    ),
    gr.Slider(
        label="Max new tokens",
        value=256,
        minimum=0,
        maximum=1048,
        step=64,
        interactive=True,
        info="The maximum numbers of new tokens",
    ),
    gr.Slider(
        label="Top-p (nucleus sampling)",
        value=0.90,
        minimum=0.0,
        maximum=1.0,
        step=0.05,
        interactive=True,
        info="Higher values sample more low-probability tokens",
    ),
    gr.Slider(
        label="Repetition penalty",
        value=1.2,
        minimum=1.0,
        maximum=2.0,
        step=0.05,
        interactive=True,
        info="Penalize repeated tokens",
    )
]

# Creating and launching the Gradio interface
gr.Interface(
    fn=generate,
    inputs=[
        gr.inputs.Textbox(lines=2, label="Your Prompt in Arabic"),
        gr.inputs.State(label="Conversation History"),
        *additional_inputs
    ],
    outputs=gr.outputs.Textbox(label="Generated Response in Arabic"),
    title="Try Arabic Misteral",
    description="Interact with an advanced AI model in Arabic. Adjust the settings below to tailor the responses. Your prompts will be translated to English, processed by the AI, and the response will be translated back to Arabic."
).launch(show_api=True)