Spaces:
Running
on
Zero
Running
on
Zero
Commit
β’
55bc837
1
Parent(s):
c34c1ce
add feedback dataset
Browse files
app.py
CHANGED
@@ -1,14 +1,20 @@
|
|
1 |
-
import
|
2 |
-
import transformers
|
3 |
-
import torch
|
4 |
import json
|
5 |
-
from transformers import AutoTokenizer
|
6 |
import os
|
7 |
-
|
|
|
|
|
|
|
|
|
8 |
import spaces
|
|
|
|
|
|
|
|
|
9 |
|
10 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
11 |
login(HF_TOKEN)
|
|
|
12 |
# Load the model
|
13 |
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
14 |
tokenizer = AutoTokenizer.from_pretrained(model_id, add_special_tokens=True)
|
@@ -32,6 +38,75 @@ terminators = [
|
|
32 |
tokenizer.convert_tokens_to_ids("<|eot_id|>"),
|
33 |
]
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
@spaces.GPU
|
37 |
def generate_instruction_response():
|
@@ -41,7 +116,15 @@ def generate_instruction_response():
|
|
41 |
{extract_input}
|
42 |
```
|
43 |
"""
|
44 |
-
yield
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
instruction = pipeline(
|
46 |
extract_input,
|
47 |
max_new_tokens=2048,
|
@@ -56,9 +139,17 @@ def generate_instruction_response():
|
|
56 |
].split("\n")[0]
|
57 |
|
58 |
first_step = (
|
59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
)
|
61 |
-
yield first_step + "\n\n### Generating LLM response..."
|
62 |
|
63 |
response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
|
64 |
|
@@ -83,32 +174,119 @@ def generate_instruction_response():
|
|
83 |
|
84 |
{sanitized_instruction}
|
85 |
|
86 |
-
|
87 |
-
|
88 |
### LLM Generated Response:
|
89 |
|
90 |
{assistant_response}
|
91 |
"""
|
92 |
-
yield
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
|
|
|
|
|
|
94 |
|
95 |
-
title = "Magpie Demo"
|
96 |
description = """
|
97 |
-
This
|
98 |
|
99 |
-
|
100 |
|
101 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
102 |
"""
|
|
|
103 |
# Create the Gradio interface
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
# Launch the app
|
114 |
iface.launch(debug=True)
|
|
|
1 |
+
import glob
|
|
|
|
|
2 |
import json
|
|
|
3 |
import os
|
4 |
+
import uuid
|
5 |
+
from datetime import datetime
|
6 |
+
from pathlib import Path
|
7 |
+
|
8 |
+
import gradio as gr
|
9 |
import spaces
|
10 |
+
import torch
|
11 |
+
import transformers
|
12 |
+
from huggingface_hub import CommitScheduler, hf_hub_download, login
|
13 |
+
from transformers import AutoTokenizer
|
14 |
|
15 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
16 |
login(HF_TOKEN)
|
17 |
+
|
18 |
# Load the model
|
19 |
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
20 |
tokenizer = AutoTokenizer.from_pretrained(model_id, add_special_tokens=True)
|
|
|
38 |
tokenizer.convert_tokens_to_ids("<|eot_id|>"),
|
39 |
]
|
40 |
|
41 |
+
# Set up dataset storage
|
42 |
+
dataset_folder = Path("dataset")
|
43 |
+
dataset_folder.mkdir(exist_ok=True)
|
44 |
+
|
45 |
+
|
46 |
+
# Function to get the latest dataset file
|
47 |
+
def get_latest_dataset_file():
|
48 |
+
if files := glob.glob(str(dataset_folder / "data_*.jsonl")):
|
49 |
+
return max(files, key=os.path.getctime)
|
50 |
+
return None
|
51 |
+
|
52 |
+
|
53 |
+
# Check for existing dataset and create or append to it
|
54 |
+
if latest_file := get_latest_dataset_file():
|
55 |
+
dataset_file = Path(latest_file)
|
56 |
+
print(f"Appending to existing dataset file: {dataset_file}")
|
57 |
+
else:
|
58 |
+
dataset_file = dataset_folder / f"data_{uuid.uuid4()}.jsonl"
|
59 |
+
print(f"Creating new dataset file: {dataset_file}")
|
60 |
+
|
61 |
+
# Set up CommitScheduler for dataset uploads
|
62 |
+
repo_id = "davanstrien/magpie-preference" # Replace with your desired dataset repo
|
63 |
+
scheduler = CommitScheduler(
|
64 |
+
repo_id=repo_id,
|
65 |
+
repo_type="dataset",
|
66 |
+
folder_path=dataset_folder,
|
67 |
+
path_in_repo="data",
|
68 |
+
every=1, # Upload every minute
|
69 |
+
)
|
70 |
+
|
71 |
+
|
72 |
+
# Function to download existing dataset files
|
73 |
+
def download_existing_dataset():
|
74 |
+
try:
|
75 |
+
files = hf_hub_download(
|
76 |
+
repo_id=repo_id, filename="data", repo_type="dataset", recursive=True
|
77 |
+
)
|
78 |
+
for file in glob.glob(os.path.join(files, "*.jsonl")):
|
79 |
+
dest_file = dataset_folder / os.path.basename(file)
|
80 |
+
if not dest_file.exists():
|
81 |
+
dest_file.write_bytes(Path(file).read_bytes())
|
82 |
+
print(f"Downloaded existing dataset file: {dest_file}")
|
83 |
+
except Exception as e:
|
84 |
+
print(f"Error downloading existing dataset: {e}")
|
85 |
+
|
86 |
+
|
87 |
+
# Download existing dataset files at startup
|
88 |
+
download_existing_dataset()
|
89 |
+
|
90 |
+
|
91 |
+
# Function to generate a session ID
|
92 |
+
def generate_session_id():
|
93 |
+
return str(uuid.uuid4())
|
94 |
+
|
95 |
+
|
96 |
+
# Function to save feedback and generated data
|
97 |
+
def save_data(generated_input, generated_response, vote, session_id):
|
98 |
+
data = {
|
99 |
+
"timestamp": datetime.now().isoformat(),
|
100 |
+
"prompt": generated_input,
|
101 |
+
"completion": generated_response,
|
102 |
+
"label": vote,
|
103 |
+
"session_id": session_id,
|
104 |
+
}
|
105 |
+
with scheduler.lock:
|
106 |
+
with dataset_file.open("a") as f:
|
107 |
+
f.write(json.dumps(data) + "\n")
|
108 |
+
return "Data saved and will be uploaded to the dataset repository."
|
109 |
+
|
110 |
|
111 |
@spaces.GPU
|
112 |
def generate_instruction_response():
|
|
|
116 |
{extract_input}
|
117 |
```
|
118 |
"""
|
119 |
+
yield (
|
120 |
+
prompt_info,
|
121 |
+
"",
|
122 |
+
"",
|
123 |
+
gr.update(interactive=False),
|
124 |
+
gr.update(interactive=False),
|
125 |
+
"",
|
126 |
+
gr.update(interactive=False),
|
127 |
+
)
|
128 |
instruction = pipeline(
|
129 |
extract_input,
|
130 |
max_new_tokens=2048,
|
|
|
139 |
].split("\n")[0]
|
140 |
|
141 |
first_step = (
|
142 |
+
f"{prompt_info}### LLM generated instruction:\n\n{sanitized_instruction}"
|
143 |
+
)
|
144 |
+
yield (
|
145 |
+
first_step + "\n\n### Generating LLM response...",
|
146 |
+
sanitized_instruction,
|
147 |
+
"",
|
148 |
+
gr.update(interactive=False),
|
149 |
+
gr.update(interactive=False),
|
150 |
+
"",
|
151 |
+
gr.update(interactive=False),
|
152 |
)
|
|
|
153 |
|
154 |
response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
|
155 |
|
|
|
174 |
|
175 |
{sanitized_instruction}
|
176 |
|
|
|
|
|
177 |
### LLM Generated Response:
|
178 |
|
179 |
{assistant_response}
|
180 |
"""
|
181 |
+
yield (
|
182 |
+
final_output,
|
183 |
+
sanitized_instruction,
|
184 |
+
assistant_response,
|
185 |
+
gr.update(interactive=True),
|
186 |
+
gr.update(interactive=True),
|
187 |
+
"",
|
188 |
+
gr.update(interactive=True),
|
189 |
+
)
|
190 |
+
|
191 |
|
192 |
+
title = """
|
193 |
+
# π¦ββ¬ Magpie Preference
|
194 |
+
"""
|
195 |
|
|
|
196 |
description = """
|
197 |
+
This demo showcases **Magpie**, an innovative approach to generating high-quality data by prompting aligned LLMs with their pre-query templates.
|
198 |
|
199 |
+
Unlike traditional methods, Magpie doesn't rely on prompt engineering or seed questions for generating synthetic data. Instead, it uses the prompt template of an aligned LLM to generate both a user query and an LLM response.
|
200 |
|
201 |
+
As well as providing a demo for the Magpie generations, this Space also allows you to submit a preference rating for the generated data, contributing to a crowdsourced dataset.
|
202 |
+
|
203 |
+
## π How it works
|
204 |
+
|
205 |
+
1. **π Instruction Generation:** The model generates a user instruction.
|
206 |
+
2. **π¬ Response Generation:** The model generates a response to this instruction.
|
207 |
+
3. **ππ User Feedback (optional):** Rate the quality of the generated content.
|
208 |
+
4. **πΎ Dataset Creation:** Feedback and generated data are saved to a Hugging Face dataset.
|
209 |
+
|
210 |
+
π Find the crowd-generated dataset [here](https://huggingface.co/datasets/davanstrien/magpie-preference). It's updated every minute!
|
211 |
+
|
212 |
+
π Learn more about Magpie in the [paper](https://huggingface.co/papers/2406.08464).
|
213 |
+
|
214 |
+
> **Note:** A random session ID groups your feedback. No personal information is collected.
|
215 |
"""
|
216 |
+
|
217 |
# Create the Gradio interface
|
218 |
+
with gr.Blocks() as iface:
|
219 |
+
gr.Markdown(title)
|
220 |
+
gr.Markdown(description)
|
221 |
+
|
222 |
+
# Add a state variable to store the session ID
|
223 |
+
session_id = gr.State(generate_session_id)
|
224 |
+
|
225 |
+
generated_input = gr.State("")
|
226 |
+
generated_response = gr.State("")
|
227 |
+
|
228 |
+
generate_btn = gr.Button("π Generate Instructions Response Pair")
|
229 |
+
|
230 |
+
output = gr.Markdown(label="Generated Data")
|
231 |
+
|
232 |
+
with gr.Row():
|
233 |
+
thumbs_up = gr.Button("π Thumbs Up", interactive=False)
|
234 |
+
thumbs_down = gr.Button("π Thumbs Down", interactive=False)
|
235 |
+
|
236 |
+
feedback_output = gr.Markdown(label="Feedback Status")
|
237 |
+
|
238 |
+
def vote_and_submit(vote, input_text, response_text, session_id):
|
239 |
+
if input_text and response_text:
|
240 |
+
feedback = save_data(
|
241 |
+
input_text, response_text, vote == "π Thumbs Up", session_id
|
242 |
+
)
|
243 |
+
return (
|
244 |
+
feedback,
|
245 |
+
gr.update(interactive=False),
|
246 |
+
gr.update(interactive=False),
|
247 |
+
gr.update(interactive=True),
|
248 |
+
)
|
249 |
+
else:
|
250 |
+
return (
|
251 |
+
"Please generate data before submitting feedback.",
|
252 |
+
gr.update(interactive=True),
|
253 |
+
gr.update(interactive=True),
|
254 |
+
gr.update(interactive=True),
|
255 |
+
)
|
256 |
+
|
257 |
+
generate_btn.click(
|
258 |
+
generate_instruction_response,
|
259 |
+
inputs=[],
|
260 |
+
outputs=[
|
261 |
+
output,
|
262 |
+
generated_input,
|
263 |
+
generated_response,
|
264 |
+
thumbs_up,
|
265 |
+
thumbs_down,
|
266 |
+
feedback_output,
|
267 |
+
generate_btn,
|
268 |
+
],
|
269 |
+
)
|
270 |
+
thumbs_up.click(
|
271 |
+
vote_and_submit,
|
272 |
+
inputs=[
|
273 |
+
gr.State("π Thumbs Up"),
|
274 |
+
generated_input,
|
275 |
+
generated_response,
|
276 |
+
session_id,
|
277 |
+
],
|
278 |
+
outputs=[feedback_output, thumbs_up, thumbs_down, generate_btn],
|
279 |
+
)
|
280 |
+
thumbs_down.click(
|
281 |
+
vote_and_submit,
|
282 |
+
inputs=[
|
283 |
+
gr.State("π Thumbs Down"),
|
284 |
+
generated_input,
|
285 |
+
generated_response,
|
286 |
+
session_id,
|
287 |
+
],
|
288 |
+
outputs=[feedback_output, thumbs_up, thumbs_down, generate_btn],
|
289 |
+
)
|
290 |
|
291 |
# Launch the app
|
292 |
iface.launch(debug=True)
|