Trying to resolve dependencies
Browse files- .gitignore +2 -0
- app.py +7 -1
- requirements.txt +3 -1
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.python-version
|
2 |
+
venv
|
app.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import gradio as gr
|
|
|
2 |
from transformers import TextStreamer
|
3 |
|
4 |
# Load model directly
|
@@ -6,13 +7,18 @@ from transformers import AutoModel, AutoTokenizer
|
|
6 |
|
7 |
"""
|
8 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
|
|
9 |
"""
|
10 |
|
11 |
-
model_name_or_path = "
|
|
|
12 |
|
13 |
model = AutoModel.from_pretrained(model_name_or_path)
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
15 |
|
|
|
|
|
|
|
16 |
|
17 |
def respond(
|
18 |
message,
|
|
|
1 |
import gradio as gr
|
2 |
+
from peft import PeftModel, PeftTokenizer
|
3 |
from transformers import TextStreamer
|
4 |
|
5 |
# Load model directly
|
|
|
7 |
|
8 |
"""
|
9 |
For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
|
10 |
+
Info of how to use a model after training on hf https://huggingface.co/docs/trl/main/en/use_model
|
11 |
"""
|
12 |
|
13 |
+
model_name_or_path = "unsloth/Llama-3.2-3B-Instruct"
|
14 |
+
adapter_name = "samlama111/lora_model"
|
15 |
|
16 |
model = AutoModel.from_pretrained(model_name_or_path)
|
17 |
tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
|
18 |
|
19 |
+
model = PeftModel.from_pretrained(model, adapter_name)
|
20 |
+
tokenizer = PeftTokenizer.from_pretrained(tokenizer, adapter_name)
|
21 |
+
|
22 |
|
23 |
def respond(
|
24 |
message,
|
requirements.txt
CHANGED
@@ -1,3 +1,5 @@
|
|
1 |
-
huggingface_hub==0.25.2
|
|
|
2 |
transformers
|
3 |
torch
|
|
|
|
1 |
+
# huggingface_hub==0.25.2
|
2 |
+
huggingface_hub
|
3 |
transformers
|
4 |
torch
|
5 |
+
peft
|