Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,9 +6,6 @@ peft_model_id = f"Bsbell21/llm_instruction_generator"
|
|
6 |
model = AutoModelForCausalLM.from_pretrained(peft_model_id, return_dict=True, device_map='auto')
|
7 |
# tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
|
8 |
mixtral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
|
9 |
-
# merged_model = model.merge_and_unload()
|
10 |
-
# Load the Lora model
|
11 |
-
# model = PeftModel.from_pretrained(model, peft_model_id)
|
12 |
|
13 |
def input_from_text(text):
|
14 |
return "<s>[INST]Use the provided input to create an instruction that could have been used to generate the response with an LLM.\n" + text + "[/INST]"
|
@@ -21,9 +18,8 @@ def get_instruction(text):
|
|
21 |
max_new_tokens=150,
|
22 |
generation_kwargs={"repetition_penalty" : 1.7}
|
23 |
)
|
24 |
-
# print(mixtral_tokenizer.decode(outputs[0], skip_special_tokens=True))
|
25 |
print(mixtral_tokenizer.decode(outputs[0], skip_special_tokens=True))
|
26 |
-
return mixtral_tokenizer.decode(outputs[0], skip_special_tokens=True)
|
27 |
|
28 |
if __name__ == "__main__":
|
29 |
# make a gradio interface
|
|
|
6 |
model = AutoModelForCausalLM.from_pretrained(peft_model_id, return_dict=True, device_map='auto')
|
7 |
# tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
|
8 |
mixtral_tokenizer = AutoTokenizer.from_pretrained("mistralai/Mixtral-8x7B-v0.1")
|
|
|
|
|
|
|
9 |
|
10 |
def input_from_text(text):
|
11 |
return "<s>[INST]Use the provided input to create an instruction that could have been used to generate the response with an LLM.\n" + text + "[/INST]"
|
|
|
18 |
max_new_tokens=150,
|
19 |
generation_kwargs={"repetition_penalty" : 1.7}
|
20 |
)
|
|
|
21 |
print(mixtral_tokenizer.decode(outputs[0], skip_special_tokens=True))
|
22 |
+
return mixtral_tokenizer.decode(outputs[0], skip_special_tokens=True).split("[/INST]")[1]
|
23 |
|
24 |
if __name__ == "__main__":
|
25 |
# make a gradio interface
|