Woziii commited on
Commit
b14462b
1 Parent(s): 9386df0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +123 -32
app.py CHANGED
@@ -11,21 +11,41 @@ import time
11
  # Authentification
12
  login(token=os.environ["HF_TOKEN"])
13
 
14
- # Liste des modèles et leurs langues supportées
15
- models_and_languages = {
16
- "meta-llama/Llama-2-13b-hf": ["en"],
17
- "meta-llama/Llama-2-7b-hf": ["en"],
18
- "meta-llama/Llama-2-70b-hf": ["en"],
19
- "meta-llama/Meta-Llama-3-8B": ["en"],
20
- "meta-llama/Llama-3.2-3B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
21
- "meta-llama/Llama-3.1-8B": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
22
- "mistralai/Mistral-7B-v0.1": ["en"],
23
- "mistralai/Mixtral-8x7B-v0.1": ["en", "fr", "it", "de", "es"],
24
- "mistralai/Mistral-7B-v0.3": ["en"],
25
- "google/gemma-2-2b": ["en"],
26
- "google/gemma-2-9b": ["en"],
27
- "google/gemma-2-27b": ["en"],
28
- "croissantllm/CroissantLLMBase": ["en", "fr"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  }
30
 
31
  # Paramètres recommandés pour chaque modèle
@@ -50,6 +70,18 @@ model = None
50
  tokenizer = None
51
  selected_language = None
52
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  def load_model(model_name, progress=gr.Progress()):
54
  global model, tokenizer
55
  try:
@@ -76,7 +108,15 @@ def load_model(model_name, progress=gr.Progress()):
76
  tokenizer.pad_token = tokenizer.eos_token
77
 
78
  progress(1.0, desc="Modèle chargé")
79
- available_languages = models_and_languages[model_name]
 
 
 
 
 
 
 
 
80
 
81
  # Mise à jour des sliders avec les valeurs recommandées
82
  params = model_parameters[model_name]
@@ -200,13 +240,24 @@ def reset():
200
  model = None
201
  tokenizer = None
202
  selected_language = None
203
- return "", 1.0, 1.0, 50, None, None, None, None, gr.Dropdown(visible=False), ""
 
 
 
 
 
 
204
 
205
  with gr.Blocks() as demo:
206
  gr.Markdown("# LLM&BIAS")
207
 
208
- with gr.Accordion("Sélection du modèle"):
209
- model_dropdown = gr.Dropdown(choices=list(models_and_languages.keys()), label="Choisissez un modèle")
 
 
 
 
 
210
  load_button = gr.Button("Charger le modèle")
211
  load_output = gr.Textbox(label="Statut du chargement")
212
  language_dropdown = gr.Dropdown(label="Choisissez une langue", visible=False)
@@ -231,18 +282,58 @@ with gr.Blocks() as demo:
231
 
232
  reset_button = gr.Button("Réinitialiser")
233
 
234
- load_button.click(load_model,
235
- inputs=[model_dropdown],
236
- outputs=[load_output, language_dropdown, temperature, top_p, top_k])
237
- language_dropdown.change(set_language, inputs=[language_dropdown], outputs=[language_output])
238
- analyze_button.click(analyze_next_token,
239
- inputs=[input_text, temperature, top_p, top_k],
240
- outputs=[next_token_probs, attention_plot, prob_plot])
241
- generate_button.click(generate_text,
242
- inputs=[input_text, temperature, top_p, top_k],
243
- outputs=[generated_text])
244
- reset_button.click(reset,
245
- outputs=[input_text, temperature, top_p, top_k, next_token_probs, attention_plot, prob_plot, generated_text, language_dropdown, language_output])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246
 
247
  if __name__ == "__main__":
248
- demo.launch()
 
11
  # Authentification
12
  login(token=os.environ["HF_TOKEN"])
13
 
14
+ # Restructuration des modèles et de leurs informations
15
+ models_info = {
16
+ "Meta-llama": {
17
+ "Llama 2": {
18
+ "7B": {"name": "meta-llama/Llama-2-7b-hf", "languages": ["en"]},
19
+ "13B": {"name": "meta-llama/Llama-2-13b-hf", "languages": ["en"]},
20
+ "70B": {"name": "meta-llama/Llama-2-70b-hf", "languages": ["en"]},
21
+ },
22
+ "Llama 3": {
23
+ "8B": {"name": "meta-llama/Meta-Llama-3-8B", "languages": ["en"]},
24
+ "3.2-3B": {"name": "meta-llama/Llama-3.2-3B", "languages": ["en", "de", "fr", "it", "pt", "hi", "es", "th"]},
25
+ "3.1-8B": {"name": "meta-llama/Llama-3.1-8B", "languages": ["en", "de", "fr", "it", "pt", "hi", "es", "th"]},
26
+ },
27
+ },
28
+ "Mistral AI": {
29
+ "Mistral": {
30
+ "7B-v0.1": {"name": "mistralai/Mistral-7B-v0.1", "languages": ["en"]},
31
+ "7B-v0.3": {"name": "mistralai/Mistral-7B-v0.3", "languages": ["en"]},
32
+ },
33
+ "Mixtral": {
34
+ "8x7B-v0.1": {"name": "mistralai/Mixtral-8x7B-v0.1", "languages": ["en", "fr", "it", "de", "es"]},
35
+ },
36
+ },
37
+ "Google": {
38
+ "Gemma": {
39
+ "2B": {"name": "google/gemma-2-2b", "languages": ["en"]},
40
+ "9B": {"name": "google/gemma-2-9b", "languages": ["en"]},
41
+ "27B": {"name": "google/gemma-2-27b", "languages": ["en"]},
42
+ },
43
+ },
44
+ "CroissantLLM": {
45
+ "CroissantLLMBase": {
46
+ "Base": {"name": "croissantllm/CroissantLLMBase", "languages": ["en", "fr"]},
47
+ },
48
+ },
49
  }
50
 
51
  # Paramètres recommandés pour chaque modèle
 
70
  tokenizer = None
71
  selected_language = None
72
 
73
+ def update_model_type(family):
74
+ return gr.Dropdown(choices=list(models_info[family].keys()), value=None, interactive=True)
75
+
76
+ def update_model_variation(family, model_type):
77
+ return gr.Dropdown(choices=list(models_info[family][model_type].keys()), value=None, interactive=True)
78
+
79
+ def update_selected_model(family, model_type, variation):
80
+ if family and model_type and variation:
81
+ model_name = models_info[family][model_type][variation]["name"]
82
+ return model_name, gr.Dropdown(choices=models_info[family][model_type][variation]["languages"], value=models_info[family][model_type][variation]["languages"][0], visible=True, interactive=True)
83
+ return "", gr.Dropdown(visible=False)
84
+
85
  def load_model(model_name, progress=gr.Progress()):
86
  global model, tokenizer
87
  try:
 
108
  tokenizer.pad_token = tokenizer.eos_token
109
 
110
  progress(1.0, desc="Modèle chargé")
111
+
112
+ # Recherche des langues disponibles pour le modèle sélectionné
113
+ available_languages = next(
114
+ (info["languages"] for family in models_info.values()
115
+ for model_type in family.values()
116
+ for variation in model_type.values()
117
+ if variation["name"] == model_name),
118
+ ["en"] # Défaut à l'anglais si non trouvé
119
+ )
120
 
121
  # Mise à jour des sliders avec les valeurs recommandées
122
  params = model_parameters[model_name]
 
240
  model = None
241
  tokenizer = None
242
  selected_language = None
243
+ return (
244
+ "", 1.0, 1.0, 50, None, None, None, None,
245
+ gr.Dropdown(choices=list(models_info.keys()), value=None, interactive=True),
246
+ gr.Dropdown(choices=[], value=None, interactive=False),
247
+ gr.Dropdown(choices=[], value=None, interactive=False),
248
+ "", gr.Dropdown(visible=False), ""
249
+ )
250
 
251
  with gr.Blocks() as demo:
252
  gr.Markdown("# LLM&BIAS")
253
 
254
+ with gr.Accordion("Sélection du modèle", open=True):
255
+ with gr.Row():
256
+ model_family = gr.Dropdown(choices=list(models_info.keys()), label="Famille de modèle", interactive=True)
257
+ model_type = gr.Dropdown(choices=[], label="Type de modèle", interactive=False)
258
+ model_variation = gr.Dropdown(choices=[], label="Variation du modèle", interactive=False)
259
+
260
+ selected_model = gr.Textbox(label="Modèle sélectionné", interactive=False)
261
  load_button = gr.Button("Charger le modèle")
262
  load_output = gr.Textbox(label="Statut du chargement")
263
  language_dropdown = gr.Dropdown(label="Choisissez une langue", visible=False)
 
282
 
283
  reset_button = gr.Button("Réinitialiser")
284
 
285
+ # Événements pour la sélection du modèle
286
+ model_family.change(
287
+ update_model_type,
288
+ inputs=[model_family],
289
+ outputs=[model_type]
290
+ )
291
+
292
+ model_type.change(
293
+ update_model_variation,
294
+ inputs=[model_family, model_type],
295
+ outputs=[model_variation]
296
+ )
297
+
298
+ model_variation.change(
299
+ update_selected_model,
300
+ inputs=[model_family, model_type, model_variation],
301
+ outputs=[selected_model, language_dropdown]
302
+ )
303
+
304
+ load_button.click(
305
+ load_model,
306
+ inputs=[selected_model],
307
+ outputs=[load_output, language_dropdown, temperature, top_p, top_k]
308
+ )
309
+
310
+ language_dropdown.change(
311
+ set_language,
312
+ inputs=[language_dropdown],
313
+ outputs=[language_output]
314
+ )
315
+
316
+ analyze_button.click(
317
+ analyze_next_token,
318
+ inputs=[input_text, temperature, top_p, top_k],
319
+ outputs=[next_token_probs, attention_plot, prob_plot]
320
+ )
321
+
322
+ generate_button.click(
323
+ generate_text,
324
+ inputs=[input_text, temperature, top_p, top_k],
325
+ outputs=[generated_text]
326
+ )
327
+
328
+ reset_button.click(
329
+ reset,
330
+ outputs=[
331
+ input_text, temperature, top_p, top_k,
332
+ next_token_probs, attention_plot, prob_plot, generated_text,
333
+ model_family, model_type, model_variation,
334
+ selected_model, language_dropdown, language_output
335
+ ]
336
+ )
337
 
338
  if __name__ == "__main__":
339
+ demo.launch()