import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForCausalLM from huggingface_hub import login import os import matplotlib.pyplot as plt import numpy as np # Authentification login(token=os.environ["HF_TOKEN"]) # Liste des modèles models = [ "meta-llama/Llama-2-13b-hf", "meta-llama/Llama-2-7b-hf", "meta-llama/Llama-2-70b-hf", "meta-llama/Meta-Llama-3-8B", "meta-llama/Llama-3.2-3B", "meta-llama/Llama-3.1-8B", "mistralai/Mistral-7B-v0.1", "mistralai/Mixtral-8x7B-v0.1", "mistralai/Mistral-7B-v0.3", "google/gemma-2-2b", "google/gemma-2-9b", "google/gemma-2-27b", "croissantllm/CroissantLLMBase" ] # Variables globales model = None tokenizer = None def load_model(model_name): global model, tokenizer try: tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained( model_name, torch_dtype=torch.bfloat16, device_map="auto", attn_implementation="eager" ) if tokenizer.pad_token is None: tokenizer.pad_token = tokenizer.eos_token return f"Modèle {model_name} chargé avec succès." except Exception as e: return f"Erreur lors du chargement du modèle : {str(e)}" def analyze_next_token(input_text, temperature, top_p, top_k): global model, tokenizer if model is None or tokenizer is None: return "Veuillez d'abord charger un modèle.", None, None inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=512).to(model.device) try: with torch.no_grad(): outputs = model(**inputs) last_token_logits = outputs.logits[0, -1, :] probabilities = torch.nn.functional.softmax(last_token_logits, dim=-1) top_k = 5 top_probs, top_indices = torch.topk(probabilities, top_k) top_words = [tokenizer.decode([idx.item()]) for idx in top_indices] prob_data = {word: prob.item() for word, prob in zip(top_words, top_probs)} prob_plot = plot_probabilities(prob_data) if hasattr(outputs, 'attentions') and outputs.attentions is not None: attention_data = torch.mean(torch.stack(outputs.attentions), dim=(0, 1)).cpu().numpy() attention_plot = plot_attention(attention_data, tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])) else: attention_plot = None return "\n".join([f"{word}: {prob:.4f}" for word, prob in prob_data.items()]), attention_plot, prob_plot except Exception as e: return f"Erreur lors de l'analyse : {str(e)}", None, None def generate_text(input_text, temperature, top_p, top_k): global model, tokenizer if model is None or tokenizer is None: return "Veuillez d'abord charger un modèle." inputs = tokenizer(input_text, return_tensors="pt", padding=True, truncation=True, max_length=512).to(model.device) try: with torch.no_grad(): outputs = model.generate( **inputs, max_new_tokens=50, temperature=temperature, top_p=top_p, top_k=top_k ) generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return generated_text except Exception as e: return f"Erreur lors de la génération : {str(e)}" def plot_attention(attention, tokens): fig, ax = plt.subplots(figsize=(10, 10)) im = ax.imshow(attention, cmap='viridis') ax.set_xticks(range(len(tokens))) ax.set_yticks(range(len(tokens))) ax.set_xticklabels(tokens, rotation=90) ax.set_yticklabels(tokens) plt.colorbar(im) plt.title("Carte d'attention") plt.tight_layout() return fig def plot_probabilities(prob_data): words = list(prob_data.keys()) probs = list(prob_data.values()) fig, ax = plt.subplots(figsize=(10, 5)) ax.bar(words, probs) ax.set_title("Probabilités des tokens suivants les plus probables") ax.set_xlabel("Tokens") ax.set_ylabel("Probabilité") plt.xticks(rotation=45) plt.tight_layout() return fig def reset(): global model, tokenizer model = None tokenizer = None return "", 1.0, 1.0, 50, None, None, None, None with gr.Blocks() as demo: gr.Markdown("# Analyse et génération de texte") with gr.Accordion("Sélection du modèle"): model_dropdown = gr.Dropdown(choices=models, label="Choisissez un modèle") load_button = gr.Button("Charger le modèle") load_output = gr.Textbox(label="Statut du chargement") with gr.Row(): temperature = gr.Slider(0.1, 2.0, value=1.0, label="Température") top_p = gr.Slider(0.1, 1.0, value=1.0, label="Top-p") top_k = gr.Slider(1, 100, value=50, step=1, label="Top-k") input_text = gr.Textbox(label="Texte d'entrée", lines=3) analyze_button = gr.Button("Analyser le prochain token") generate_button = gr.Button("Générer la suite du texte") next_token_probs = gr.Textbox(label="Probabilités du prochain token") with gr.Row(): attention_plot = gr.Plot(label="Visualisation de l'attention") prob_plot = gr.Plot(label="Probabilités des tokens suivants") generated_text = gr.Textbox(label="Texte généré", lines=5) reset_button = gr.Button("Réinitialiser") load_button.click(load_model, inputs=[model_dropdown], outputs=[load_output]) analyze_button.click(analyze_next_token, inputs=[input_text, temperature, top_p, top_k], outputs=[next_token_probs, attention_plot, prob_plot]) generate_button.click(generate_text, inputs=[input_text, temperature, top_p, top_k], outputs=[generated_text]) reset_button.click(reset, outputs=[input_text, temperature, top_p, top_k, next_token_probs, attention_plot, prob_plot, generated_text]) if __name__ == "__main__": demo.launch()