File size: 3,096 Bytes
43cd37c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# import gradio as gr
# from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# import os
# import torch
#
# # Assuming models are stored in a 'models' directory
# MODELS_DIR = "models"
#
#
# def get_local_models():
#     if not os.path.exists(MODELS_DIR):
#         os.makedirs(MODELS_DIR)
#     return [d for d in os.listdir(MODELS_DIR) if os.path.isdir(os.path.join(MODELS_DIR, d))]
#
#
# def download_model(model_name):
#     try:
#         tokenizer = AutoTokenizer.from_pretrained(model_name)
#         model = AutoModelForCausalLM.from_pretrained(model_name)
#
#         # Save the model and tokenizer
#         save_path = os.path.join(MODELS_DIR, model_name.split('/')[-1])
#         tokenizer.save_pretrained(save_path)
#         model.save_pretrained(save_path)
#
#         return f"Successfully downloaded model: {model_name}"
#     except Exception as e:
#         return f"Failed to download model: {str(e)}"
#
#
# def run_inference(model_name, prompt):
#     try:
#         model_path = os.path.join(MODELS_DIR, model_name)
#         tokenizer = AutoTokenizer.from_pretrained(model_path)
#         model = AutoModelForCausalLM.from_pretrained(model_path)
#
#         # Use GPU if available
#         device = "cuda" if torch.cuda.is_available() else "cpu"
#         model.to(device)
#
#         # Create a text-generation pipeline
#         text_generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
#
#         # Generate text
#         result = text_generator(prompt, max_length=100, num_return_sequences=1)
#
#         return result[0]['generated_text']
#     except Exception as e:
#         return f"Error running inference: {str(e)}"
#
#
# def create_huggingface_tab():
#     with gr.Tab("Hugging Face Transformers"):
#         gr.Markdown("# Hugging Face Transformers Model Management")
#
#         with gr.Row():
#             model_list = gr.Dropdown(label="Available Models", choices=get_local_models())
#             refresh_button = gr.Button("Refresh Model List")
#
#         with gr.Row():
#             new_model_name = gr.Textbox(label="Model to Download (e.g., 'gpt2' or 'EleutherAI/gpt-neo-1.3B')")
#             download_button = gr.Button("Download Model")
#
#         download_output = gr.Textbox(label="Download Status")
#
#         with gr.Row():
#             run_model = gr.Dropdown(label="Model to Run", choices=get_local_models())
#             prompt = gr.Textbox(label="Prompt")
#             run_button = gr.Button("Run Inference")
#
#         run_output = gr.Textbox(label="Model Output")
#
#         def update_model_lists():
#             models = get_local_models()
#             return gr.update(choices=models), gr.update(choices=models)
#
#         refresh_button.click(update_model_lists, outputs=[model_list, run_model])
#         download_button.click(download_model, inputs=[new_model_name], outputs=[download_output])
#         run_button.click(run_inference, inputs=[run_model, prompt], outputs=[run_output])