Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -9,21 +9,22 @@ hf_token = os.getenv("HF_TOKEN")
|
|
9 |
if hf_token is None:
|
10 |
raise ValueError("HF_TOKEN is not set. Please check your secrets.")
|
11 |
|
12 |
-
#
|
13 |
-
base_model_name = "
|
14 |
lora_model_name = "TooKeen/neo-blockchain-assistant"
|
15 |
|
16 |
-
|
17 |
-
|
|
|
18 |
model = PeftModel.from_pretrained(base_model, lora_model_name)
|
19 |
|
20 |
-
# Definiere die
|
21 |
def generate_text(prompt):
|
22 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
|
23 |
outputs = model.generate(**inputs, max_length=100)
|
24 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
25 |
|
26 |
-
#
|
27 |
interface = gr.Interface(
|
28 |
fn=generate_text,
|
29 |
inputs=gr.Textbox(lines=2, placeholder="Geben Sie Ihren Text hier ein..."),
|
|
|
9 |
if hf_token is None:
|
10 |
raise ValueError("HF_TOKEN is not set. Please check your secrets.")
|
11 |
|
12 |
+
# Basismodell- und LoRA-Modellnamen
|
13 |
+
base_model_name = "mistralai/Mistral-7B-Instruct-v0.2"
|
14 |
lora_model_name = "TooKeen/neo-blockchain-assistant"
|
15 |
|
16 |
+
# Lade Tokenizer und Modell mit Authentifizierung
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_name, token=hf_token)
|
18 |
+
base_model = AutoModelForCausalLM.from_pretrained(base_model_name, token=hf_token, device_map="auto")
|
19 |
model = PeftModel.from_pretrained(base_model, lora_model_name)
|
20 |
|
21 |
+
# Definiere die Textgenerierungsfunktion
|
22 |
def generate_text(prompt):
|
23 |
inputs = tokenizer(prompt, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
|
24 |
outputs = model.generate(**inputs, max_length=100)
|
25 |
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
26 |
|
27 |
+
# Gradio-Oberfläche einrichten
|
28 |
interface = gr.Interface(
|
29 |
fn=generate_text,
|
30 |
inputs=gr.Textbox(lines=2, placeholder="Geben Sie Ihren Text hier ein..."),
|