Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -129,6 +129,33 @@ code_generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
|
129 |
# AI Assistant
|
130 |
hf_api = HfApi()
|
131 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
132 |
def generate_app(user_idea, project_name):
|
133 |
# Extract key information from the user idea
|
134 |
# (You might want to use a more sophisticated NLP pipeline here)
|
|
|
129 |
# AI Assistant
|
130 |
hf_api = HfApi()
|
131 |
|
132 |
+
def model_menu():
|
133 |
+
models = ["distilbert", "t5", "codellama-7b", "geminai-1.5b"]
|
134 |
+
selected_model = st.sidebar.selectbox("Select a model:", models)
|
135 |
+
|
136 |
+
# Add the code snippet here
|
137 |
+
try:
|
138 |
+
if selected_model == "distilbert":
|
139 |
+
model = pipeline("text-generation", model="distilbert-base-uncased")
|
140 |
+
elif selected_model == "t5":
|
141 |
+
model = pipeline("text-generation", model="t5-base")
|
142 |
+
elif selected_model == "codellama-7b":
|
143 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("codegen-7B-mono")
|
144 |
+
tokenizer = AutoTokenizer.from_pretrained("codegen-7B-mono")
|
145 |
+
model = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
146 |
+
elif selected_model == "geminai-1.5b":
|
147 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("geminai-1.5b")
|
148 |
+
tokenizer = AutoTokenizer.from_pretrained("geminai-1.5b")
|
149 |
+
model = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
150 |
+
else:
|
151 |
+
raise ValueError("Invalid model name")
|
152 |
+
return model
|
153 |
+
except Exception as e:
|
154 |
+
logging.error(f"Error importing model: {e}")
|
155 |
+
return None
|
156 |
+
|
157 |
+
return selected_model
|
158 |
+
|
159 |
def generate_app(user_idea, project_name):
|
160 |
# Extract key information from the user idea
|
161 |
# (You might want to use a more sophisticated NLP pipeline here)
|