Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- app.py +151 -0
- requirements.txt +9 -0
app.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from peft import PeftModel
|
3 |
+
import transformers
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
assert (
|
7 |
+
"LlamaTokenizer" in transformers._import_structure["models.llama"]
|
8 |
+
), "LLaMA is now in HuggingFace's main branch.\nPlease reinstall it: pip uninstall transformers && pip install git+https://github.com/huggingface/transformers.git"
|
9 |
+
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig
|
10 |
+
|
11 |
+
BASE_MODEL = "decapoda-research/llama-7b-hf"
|
12 |
+
|
13 |
+
tokenizer = LlamaTokenizer.from_pretrained(BASE_MODEL,device_map={'': 0})
|
14 |
+
|
15 |
+
LORA_WEIGHTS = "kunishou/Japanese-Alapaca-LoRA-7b-v0"
|
16 |
+
|
17 |
+
if BASE_MODEL == "decapoda-research/llama-7b-hf":
|
18 |
+
model_param = "7B"
|
19 |
+
elif BASE_MODEL == "decapoda-research/llama-13b-hf":
|
20 |
+
model_param = "13B"
|
21 |
+
elif BASE_MODEL == "decapoda-research/llama-30b-hf":
|
22 |
+
model_param = "30B"
|
23 |
+
|
24 |
+
if torch.cuda.is_available():
|
25 |
+
device = "cuda"
|
26 |
+
else:
|
27 |
+
device = "cpu"
|
28 |
+
|
29 |
+
try:
|
30 |
+
if torch.backends.mps.is_available():
|
31 |
+
device = "mps"
|
32 |
+
except:
|
33 |
+
pass
|
34 |
+
|
35 |
+
if device == "cuda":
|
36 |
+
model = LlamaForCausalLM.from_pretrained(
|
37 |
+
BASE_MODEL,
|
38 |
+
load_in_8bit=True,
|
39 |
+
torch_dtype=torch.float16,
|
40 |
+
# device_map="auto",
|
41 |
+
device_map={'': 0},
|
42 |
+
)
|
43 |
+
model = PeftModel.from_pretrained(model, LORA_WEIGHTS, torch_dtype=torch.float16, device_map={'': 0},)
|
44 |
+
elif device == "mps":
|
45 |
+
model = LlamaForCausalLM.from_pretrained(
|
46 |
+
BASE_MODEL,
|
47 |
+
# device_map={"": device},
|
48 |
+
device_map={'': 0},
|
49 |
+
torch_dtype=torch.float16,
|
50 |
+
)
|
51 |
+
model = PeftModel.from_pretrained(
|
52 |
+
model,
|
53 |
+
LORA_WEIGHTS,
|
54 |
+
# device_map={"": device},
|
55 |
+
device_map={'': 0},
|
56 |
+
torch_dtype=torch.float16,
|
57 |
+
)
|
58 |
+
else:
|
59 |
+
model = LlamaForCausalLM.from_pretrained(
|
60 |
+
BASE_MODEL,
|
61 |
+
# device_map={"": device},
|
62 |
+
device_map={'': 0},
|
63 |
+
low_cpu_mem_usage=True
|
64 |
+
)
|
65 |
+
model = PeftModel.from_pretrained(
|
66 |
+
model,
|
67 |
+
LORA_WEIGHTS,
|
68 |
+
# device_map={"": device},
|
69 |
+
device_map={'': 0},
|
70 |
+
)
|
71 |
+
|
72 |
+
|
73 |
+
def generate_prompt(instruction, input=None):
|
74 |
+
if input:
|
75 |
+
return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
|
76 |
+
### Instruction:
|
77 |
+
{instruction}
|
78 |
+
### Input:
|
79 |
+
{input}
|
80 |
+
### Response:"""
|
81 |
+
else:
|
82 |
+
return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request.
|
83 |
+
### Instruction:
|
84 |
+
{instruction}
|
85 |
+
### Response:"""
|
86 |
+
|
87 |
+
|
88 |
+
model.eval()
|
89 |
+
if torch.__version__ >= "2":
|
90 |
+
model = torch.compile(model)
|
91 |
+
|
92 |
+
|
93 |
+
def evaluate(
|
94 |
+
instruction,
|
95 |
+
input=None,
|
96 |
+
temperature=0.1,
|
97 |
+
top_p=0.75,
|
98 |
+
top_k=40,
|
99 |
+
num_beams=4,
|
100 |
+
max_new_tokens=256,
|
101 |
+
**kwargs,
|
102 |
+
):
|
103 |
+
prompt = generate_prompt(instruction, input)
|
104 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
105 |
+
input_ids = inputs["input_ids"].to(device)
|
106 |
+
generation_config = GenerationConfig(
|
107 |
+
temperature=temperature,
|
108 |
+
top_p=top_p,
|
109 |
+
top_k=top_k,
|
110 |
+
num_beams=num_beams,
|
111 |
+
no_repeat_ngram_size=3,
|
112 |
+
**kwargs,
|
113 |
+
)
|
114 |
+
|
115 |
+
with torch.no_grad():
|
116 |
+
generation_output = model.generate(
|
117 |
+
input_ids=input_ids,
|
118 |
+
generation_config=generation_config,
|
119 |
+
return_dict_in_generate=True,
|
120 |
+
output_scores=True,
|
121 |
+
max_new_tokens=max_new_tokens,
|
122 |
+
)
|
123 |
+
s = generation_output.sequences[0]
|
124 |
+
output = tokenizer.decode(s)
|
125 |
+
return output.split("### Response:")[1].strip()
|
126 |
+
|
127 |
+
|
128 |
+
gr.Interface(
|
129 |
+
fn=evaluate,
|
130 |
+
inputs=[
|
131 |
+
gr.components.Textbox(
|
132 |
+
lines=2, label="Instruction", placeholder="Tell me about alpacas."
|
133 |
+
),
|
134 |
+
gr.components.Textbox(lines=2, label="Input", placeholder="none"),
|
135 |
+
gr.components.Slider(minimum=0, maximum=1, value=0.1, label="Temperature"),
|
136 |
+
# gr.components.Slider(minimum=0, maximum=1, value=0.75, label="Top p"),
|
137 |
+
# gr.components.Slider(minimum=0, maximum=100, step=1, value=40, label="Top k"),
|
138 |
+
gr.components.Slider(minimum=1, maximum=4, step=1, value=4, label="Beams"),
|
139 |
+
gr.components.Slider(
|
140 |
+
minimum=1, maximum=512, step=1, value=256, label="Max tokens"
|
141 |
+
),
|
142 |
+
],
|
143 |
+
outputs=[
|
144 |
+
gr.inputs.Textbox(
|
145 |
+
lines=8,
|
146 |
+
label="Output",
|
147 |
+
)
|
148 |
+
],
|
149 |
+
title=f"🦙🌲 Japanese-Alpaca-LoRA-{model_param}",
|
150 |
+
description=f"Alpaca-LoRA is a {model_param}-parameter LLaMA model finetuned to follow instructions. It is trained on the Stanford Alpaca dataset translated into Japanese and makes use of the Huggingface LLaMA implementation. For more information, please visit [the project's website](https://github.com/kunishou/Japanese-Alpaca-LoRA).",
|
151 |
+
).launch()
|
requirements.txt
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
datasets
|
2 |
+
loralib
|
3 |
+
sentencepiece
|
4 |
+
git+https://github.com/huggingface/transformers.git
|
5 |
+
accelerate
|
6 |
+
bitsandbytes
|
7 |
+
git+https://github.com/huggingface/peft.git
|
8 |
+
gradio
|
9 |
+
appdirs
|