|
--- |
|
license: apache-2.0 |
|
base_model: mistralai/Mistral-7B-Instruct-v0.3 |
|
dataset: flopsy1/arxivset |
|
library_name: transformers |
|
pipeline_tag: text-generation |
|
tags: |
|
- mistral |
|
- arxflix |
|
--- |
|
# Mistral 7B instruct finetuned to produce script for axflix |
|
|
|
# Load model directly |
|
```py |
|
from mistral_inference.model import Transformer |
|
from mistral_inference.generate import generate |
|
|
|
from mistral_common.tokens.tokenizers.mistral import MistralTokenizer |
|
from mistral_common.protocol.instruct.messages import UserMessage |
|
from mistral_common.protocol.instruct.request import ChatCompletionRequest |
|
|
|
|
|
tokenizer = MistralTokenizer.from_file("tokenizer.model.v3") # change to extracted tokenizer file |
|
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.3") # change to extracted model dir |
|
model.load_lora("lora.safetensors") |
|
``` |