from argparse import ArgumentParser import json from tqdm import tqdm from dataclasses import dataclass from typing import List, Optional, Dict, Set from sentence_transformers import SentenceTransformer import numpy as np import hnswlib @dataclass class Doc: input: str output: str @staticmethod def from_json(doc: Dict): return Doc(input=doc['input'], output=doc['output']) if __name__ == "__main__": parser = ArgumentParser(prog="convert.py", description="dadjokes reddit CSV parser") parser.add_argument("--data", action="store", help="path to input JSON file", required=True) parser.add_argument("--out", action="store", help="path to output file", required=True) parser.add_argument("--inst", action="store", help="alpaca instruction", required=True) args = parser.parse_args() print(args) model = SentenceTransformer("intfloat/e5-base-v2",device="cuda") with open(args.data, 'r') as input: docs: List[Doc] = [] for line in tqdm(input.readlines()): item = Doc.from_json(json.loads(line)) docs.append(item) embeddings = model.encode([f"passage: {doc.input} {doc.output}" for doc in docs], batch_size=512, show_progress_bar=True) p = hnswlib.Index(space = 'cosine', dim = 768) print("building index") p.init_index(max_elements = len(docs), ef_construction = 200, M = 16) p.add_items(embeddings, [id for id, doc in enumerate(docs)]) print("computing similarity") labels, distances = p.knn_query(embeddings, k = 10) skips: Set[int] = set() print("search done, exporting") dupe_count = 0 broken_count = 0 with open(args.out,'w') as output: for (index, doc), label_list, dist_list in zip(enumerate(docs), labels.tolist(), distances.tolist()): if index not in skips: if "http" not in doc.output: jdoc = {"input": doc.input, "output": doc.output, "instruction": args.inst} output.write(json.dumps(jdoc) + '\n') else: broken_count += 1 else: dupe_count += 1 skips.add(index) for label, dist in zip(label_list, dist_list): if (dist < 0.07): skips.add(label) print(f"done: dupes={dupe_count} broken={broken_count}")