v2ray commited on
Commit
6d97d06
·
1 Parent(s): bfae27f

Fixed HTML escape and added min message count.

Browse files
boards/a.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c44fca4fcb9cf1d836842d2fc2bb22d80efc206cb6b8cf2492860c4eb8c673dd
3
- size 32702059
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a5fc73eb9f1486b67e5898a19b7c179429a65a32ab32859fe2667f3d1bcc187
3
+ size 32697664
boards/b.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:32d5459b1e5a7e3fe3eb91245d5cbad73d3544c30127a9e333f65a60c160009e
3
- size 13258810
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5234debf175abd4357041759b56660adc48251fb862c0ce872b32768da89cc73
3
+ size 13251396
boards/bant.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e9950e6061272de947b61837e97d0e5506a3fa9660501f036a275354d58db31
3
- size 7206919
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99908c0da76a7e800f1eb6b9755cbf0e7ec633f70dba6d77ac1cbbb1f07b50c6
3
+ size 7204467
boards/biz.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5817fa6d8e08ea64622f09b936c37ed7a9798579cd417227de569a8962b207f6
3
- size 17485190
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37c97ed0aac7febcb871ef552d2729c5a7f1e1cd841487e12204083f179afccc
3
+ size 17479781
boards/ck.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7a2a43dc54fcd058209fe3c6b9d5a16cf4f5b4912e9ca5ffb91019d09b571342
3
- size 9457996
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6245fb53c575feaf581a6f428a7e167c860e6da30a9d938cdc9d6cff870ebf01
3
+ size 9456185
boards/co.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17a525f7b19ae9e82453a42c83e7f579d0a6fce94580257a4c8991e669b99fb7
3
- size 26061648
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4fff3f87e831c1ecd36299ac3b0f0d1488dfe6451c35976417b9bf6ffdb12df
3
+ size 26055247
boards/diy.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c96e3cb8de222451a27f2f2eed15ac206584ed5b5e9739acd76370e243c1078c
3
- size 4432034
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c68cd232e94751e56ebab7424c2d35fce55f629c58f095a0c44629492fe34b91
3
+ size 4430720
boards/fit.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e447e110a14b72a15047ceb66c9557246a0dbde65b8888da23a084e75b7dd1f0
3
- size 17070863
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31b0d8ff2e5bec15dbbaa28e48c324efe6f826c1554de7198900ab6e9f7f4875
3
+ size 17067876
boards/g.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8c2564294dceb6871555544ad4508b5e601bba6f11a29dbdd7252c6f91af188
3
- size 41173302
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d0889dd91cd5b37bd51ecc3492d68c462f9afc59cffecab2792aefb0d0fdefb
3
+ size 41156888
boards/his.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e5d65724c80f78b7dfa9592e2f8e2f340118eae9b840ec1cbbc1c0100aab94aa
3
- size 14338382
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:355d21e52eabdb8cadacc84e7388bc03a57cc6064200f696de4b9955ba7996ea
3
+ size 14336670
boards/int.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1cae8ce5bcd5203ef02f55c466cc8672d97f511b142b4242a5f50310fecdee8d
3
- size 46113932
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a7887ec1e83bbb2d17c2326f07a68e1182912215ed8a9c38b1f8a6e9a0e3798
3
+ size 46106098
boards/jp.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2616854cf7bdc5221ccadc162ba8f6419e75671f9f58431a41265654a160a387
3
- size 12655890
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f666c7857968b9c011bd3285e6a6a6f9914bfed797df6e185e009f4e7a977e99
3
+ size 12652996
boards/k.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f8fd69f1ff11afd0d45be29ca112c712e71506e9fb4571828f62d6b3182a6615
3
- size 27928435
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b18db8afbdb455c169bbc0a35a55e9a8d864c06befa43dc401dda6bba4d7e5e4
3
+ size 27920419
boards/lit.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b3d5231671f54b680de65c540a65f0fb76e807a67aa062076f60fdac85a9c16c
3
- size 15027449
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e03ff273a2d98889200da4d8fc026f6d45cc1fbe75dbfcb09488d90fbfebda17
3
+ size 15023797
boards/m.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18e40d403baac0f1af9c829ab1b76d37f59b50ee7a8d347a5b4612e99c30e5cb
3
- size 10306791
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7f69b28c7a8a7a95d8647f52b198bae8d31172b094bf58cfe33d68a772cd424
3
+ size 10304278
boards/mu.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:60f6cff77af92d8b0bb3492dcec0fba4c0bbfb8de4136528b198ec6145ccbe83
3
- size 11128010
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d2a833d8d62334dcc7213f810a8024f4e41f01d80055e8be8ad5699cbca53957
3
+ size 11121936
boards/out.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6bb51f66d7707f703671aa3e37c506ef6726802b6c33b800b8aab699aed4265d
3
- size 3764037
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:988645ac00f57e9783282c2f6a76c0abeff39e8f538f43733866b90ba5d2f6e7
3
+ size 3763284
boards/r9k.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f084d13ba48e508a5febfb42eff0ed5ecd6b053ed63db6e76e1e2f9ccd3b0965
3
- size 34360724
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15258673bfe5d6704de39577dd44772b13af0dc4dc557ce7be7ac419fa85be6a
3
+ size 34355927
boards/sci.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb14b0f807a860ea2c8961df842dc832435e770cb59322a352a36b9dd7025885
3
- size 10234842
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2beec036bb03134fa056e26c11b2175d0f079a34fec903b2ed7ae42eae59a3ec
3
+ size 10232156
boards/trash.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87712a9661105e0194e340fe2bf5ae7c203511d2489ee6ad1b57cd2eb56957c7
3
- size 26702413
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a640afc865c25f4637b0b74ad1836fa6555c77b5fce078af84dcc08d091bb853
3
+ size 26691642
boards/v.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f78976305704c7967993b989a55fea04d319e9877b4ec4e90ea43e5565c4a91b
3
- size 67921235
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea8578431008a35be92ea86876b2a25faea683015919eb17b03a7fa537f0344e
3
+ size 67911016
boards/vg.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bd0241b3b266238875e6d25186f16d2121108e0f4f1e5dc8216008b9f3e8db0f
3
- size 117231582
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b56c3c055aedbe3e932c3e286781e382a9bb2860245230cfb4a50b52e7d695a2
3
+ size 117204636
boards/vmg.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:34bdcfe04fd7fcccb42019a3bbebfda224e1d82b0c60b2a8d28e0590edfc99b7
3
- size 6491962
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93f8513f7be64aab614c76262e52e4610b0f1a816ae414c0424784b2f7370fd3
3
+ size 6490402
boards/vt.json CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9996c4ef914bfad0d96ef34ac3c85eea24bd76eeb58901861c5303b179e202a7
3
- size 28696178
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8b3340c68c7c656abad784b10c3650829e477eae292ae03de5b2070f791717f
3
+ size 28686830
condense.py CHANGED
@@ -2,6 +2,7 @@ import os
2
  import re
3
  import sys
4
  import json
 
5
 
6
  MESSAGE_SPLIT_PATTERN = re.compile(r"\n--- (?=\d+ *\n)")
7
  REPLY_ID_PATTERN = re.compile(r">>(\d+)")
@@ -44,7 +45,7 @@ def main():
44
  nonlocal replace_count
45
  replace_count += 1
46
  return f">>{orig_to_norm.get(int(match.group(1)), "unknown")}"
47
- line = REPLY_ID_PATTERN.sub(replace_id, line)
48
  if not message_has_content and replace_count == 0 and line.strip():
49
  message_has_content = True
50
  if j != 1:
 
2
  import re
3
  import sys
4
  import json
5
+ import html
6
 
7
  MESSAGE_SPLIT_PATTERN = re.compile(r"\n--- (?=\d+ *\n)")
8
  REPLY_ID_PATTERN = re.compile(r">>(\d+)")
 
45
  nonlocal replace_count
46
  replace_count += 1
47
  return f">>{orig_to_norm.get(int(match.group(1)), "unknown")}"
48
+ line = html.unescape(REPLY_ID_PATTERN.sub(replace_id, line))
49
  if not message_has_content and replace_count == 0 and line.strip():
50
  message_has_content = True
51
  if j != 1:
merge.py CHANGED
@@ -9,6 +9,7 @@ def parse_args():
9
  parser = argparse.ArgumentParser(description="Merge the boards into a single train set.")
10
  parser.add_argument("-t", "--tokenizer-name-or-path", default="meta-llama/Llama-3.1-8B", help="The name or path for the tokenizer")
11
  parser.add_argument("-l", "--limit", type=int, default=4096, help="Length limit in tokens for each post")
 
12
  parser.add_argument("-i", "--id", default="<|start_header_id|>", help="Prefix token for message IDs")
13
  parser.add_argument("-c", "--content", default="<|end_header_id|>", help="Prefix token for message contents")
14
  return parser.parse_args()
@@ -21,6 +22,7 @@ def main():
21
  if args.content not in tokenizer.vocab:
22
  print(f"The message content prefix token \"{args.content}\" is not a token in \"{args.tokenizer_name_or_path}\", it will work but it's better to be a token in the tokenizer.")
23
  boards_dir = "boards"
 
24
  with open("merged_strings_train.jsonl", "w", encoding="utf8") as output:
25
  for board_path in tqdm.tqdm(os.listdir(boards_dir), desc="Boards"):
26
  board_name, ext = os.path.splitext(board_path)
@@ -32,17 +34,22 @@ def main():
32
  with open(board_path, "r", encoding="utf8") as f:
33
  board = json.load(f)
34
  for post in tqdm.tqdm(board, desc="Posts"):
 
 
35
  post_content = board_name
36
  post_token_count = len(tokenizer.encode(post_content, add_special_tokens=False)) + 2 # Add 2 for the start of string and end of string tokens.
37
  for message in post:
38
  formatted = f"{args.id}{message["id"]}{args.content}{message["content"]}"
39
  formatted_token_count = len(tokenizer.encode(formatted, add_special_tokens=False))
40
- post_token_count += formatted_token_count
41
- if post_token_count > args.limit:
42
  break
43
  post_content += formatted
 
44
  json.dump({"input": "", "output": post_content}, output, ensure_ascii=False)
45
  output.write("\n")
 
 
46
 
47
  if __name__ == "__main__":
48
  try:
 
9
  parser = argparse.ArgumentParser(description="Merge the boards into a single train set.")
10
  parser.add_argument("-t", "--tokenizer-name-or-path", default="meta-llama/Llama-3.1-8B", help="The name or path for the tokenizer")
11
  parser.add_argument("-l", "--limit", type=int, default=4096, help="Length limit in tokens for each post")
12
+ parser.add_argument("-m", "--min", type=int, default=5, help="Minimum amount of message in each post")
13
  parser.add_argument("-i", "--id", default="<|start_header_id|>", help="Prefix token for message IDs")
14
  parser.add_argument("-c", "--content", default="<|end_header_id|>", help="Prefix token for message contents")
15
  return parser.parse_args()
 
22
  if args.content not in tokenizer.vocab:
23
  print(f"The message content prefix token \"{args.content}\" is not a token in \"{args.tokenizer_name_or_path}\", it will work but it's better to be a token in the tokenizer.")
24
  boards_dir = "boards"
25
+ total_token_count = 0
26
  with open("merged_strings_train.jsonl", "w", encoding="utf8") as output:
27
  for board_path in tqdm.tqdm(os.listdir(boards_dir), desc="Boards"):
28
  board_name, ext = os.path.splitext(board_path)
 
34
  with open(board_path, "r", encoding="utf8") as f:
35
  board = json.load(f)
36
  for post in tqdm.tqdm(board, desc="Posts"):
37
+ if len(post) < args.min:
38
+ continue
39
  post_content = board_name
40
  post_token_count = len(tokenizer.encode(post_content, add_special_tokens=False)) + 2 # Add 2 for the start of string and end of string tokens.
41
  for message in post:
42
  formatted = f"{args.id}{message["id"]}{args.content}{message["content"]}"
43
  formatted_token_count = len(tokenizer.encode(formatted, add_special_tokens=False))
44
+ added_token_count = post_token_count + formatted_token_count
45
+ if added_token_count > args.limit:
46
  break
47
  post_content += formatted
48
+ post_token_count = added_token_count
49
  json.dump({"input": "", "output": post_content}, output, ensure_ascii=False)
50
  output.write("\n")
51
+ total_token_count += post_token_count
52
+ print("Merge finished, total token count:", total_token_count)
53
 
54
  if __name__ == "__main__":
55
  try:
merged_strings_train.jsonl CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a4e9106e7892f1e37dc81737b8cd96f994e8fcdb8056a03b9bd47fe13818d488
3
- size 361276051
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d238c40138145de7d25cce4b9bb9ec6dec74b6d0b35bcc2c9badffdda3650948
3
+ size 354786900