|
--- |
|
license: apache-2.0 |
|
--- |
|
|
|
|
|
### Imports |
|
|
|
```python |
|
import torch |
|
from hqq.engine.hf import HQQModelForCausalLM, AutoTokenizer |
|
from hqq.utils.patching import prepare_for_inference |
|
``` |
|
|
|
|
|
### Loading Weights |
|
```python |
|
model = HQQModelForCausalLM.from_quantized("rohitg/Mixtral-8x22B-Instruct-v0.1-hf-4bit_g64-HQQ", device='cuda') |
|
tokenizer = AutoTokenizer.from_pretrained('mistralai/Mixtral-8x22B-Instruct-v0.1') |
|
prepare_for_inference(model, backend="torchao_int4") |
|
``` |
|
|
|
### Text Generation |
|
|
|
```python |
|
prompt = "<s> [INST] How do I build a car? [/INST] " |
|
|
|
inputs = tokenizer(prompt, return_tensors="pt", add_special_tokens=False) |
|
outputs = model.generate(**(inputs.to('cuda')), max_new_tokens=1000) |
|
print(tokenizer.decode(outputs[0], skip_special_tokens=True)) |
|
``` |
|
|