reddit-dadjokes / src /parse.py
shuttie's picture
initial commit
8caeaa7
from transformers import (
pipeline,
)
import torch
import json
from transformers.pipelines.pt_utils import KeyDataset
from tqdm import tqdm
from datasets import Dataset
from argparse import ArgumentParser
from typing import Dict, List
# Reddit CSV dump parser script
def make_prompt_mistral(data: Dict[str, List]) -> Dict[str, List]:
prompt_template = "### Instruction:\n{instruct}:\n\n### Input:\n{input}\n\n### Response:\n"
result = []
for doc in data["joke"]:
prompt = prompt_template.format(
instruct="For the following joke, add a | separator between intro part and the punchline. "
"Do not change the sentence, only add a separator. "
"Full sencence should be considered a punchline. "
"A question is a full intro section, everything following a question must be considered punchline. "
"Do not repeat the punchline, do not change words in the sentence.",
input=doc,
)
result.append(prompt)
return {"prompt": result}
def make_prompt_llama3(data: Dict[str, List]) -> Dict[str, List]:
prompt_template = "<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{instruct}<|eot_id|>\n<|start_header_id|>user<|end_header_id|>\n\n{input}<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>\n\n"
result = []
for doc in data["joke"]:
prompt = prompt_template.format(
instruct="For the following joke, add a | separator between intro part and the punchline. "
"Do not change the sentence, only add a separator. "
"Full sencence should be considered a punchline. "
"A question is a full intro section, everything following a question must be considered punchline. "
"Do not repeat the punchline, do not change words in the sentence. "
"Do not repeat this instruction, only output the result."
"Do not tell you're an assistant.",
input=doc[:256],
)
result.append(prompt)
return {"prompt": result}
if __name__ == "__main__":
parser = ArgumentParser(prog="batch_split", description="dadjokes")
parser.add_argument("--data", action="store", help="path to reddit.csv", required=True)
parser.add_argument("--out", action="store", help="path to out file", required=True)
args = parser.parse_args()
print(args)
dataset = Dataset.from_csv(args.data, split="train")
generator = pipeline(
task="text-generation",
# model="mistralai/Mistral-7B-Instruct-v0.3",
model="meta-llama/Meta-Llama-3-8B-Instruct",
torch_dtype=torch.bfloat16,
device_map="auto",
)
generator.tokenizer.pad_token_id = 128009
prompts = KeyDataset(dataset.map(function=make_prompt_llama3, batched=True), "prompt")
with open(args.out, "w") as f:
for result in tqdm(
generator(
prompts,
return_full_text=False,
max_new_tokens=128,
num_return_sequences=1,
batch_size=24,
),
total=len(prompts),
):
raw_text = result[0]["generated_text"].split("\n")
joke = raw_text[-1]
if "|" in joke:
tokens = joke.split("|")
if len(tokens) == 2:
intro = tokens[0].strip()
punch = tokens[1].strip()
if len(intro) > 10 and len(punch) > 5:
f.write(json.dumps({"input": intro, "output": punch}) + "\n")