donfu commited on
Commit
ae54b00
1 Parent(s): 03e6e28

Implement review comments from OA

Browse files
Files changed (8) hide show
  1. README.md +30 -2
  2. combine.py +5 -10
  3. download.py +23 -22
  4. merge_parquets.py +2 -14
  5. process.py +10 -10
  6. stats.md +0 -186
  7. stats.py +3 -1
  8. upload.py +1 -1
README.md CHANGED
@@ -37,8 +37,11 @@ pretty_name: Open-Assistant StackExchange Instruction
37
 
38
  This dataset is taken from https://archive.org/details/stackexchange.
39
 
40
- There's a single parquet file combining all stackexchange sites.
41
- The threads have been filtered as follows: only threads with an accepted answer, for which both the question and response is less than 1000 characters have been choosen. Other answers, or questions without accepted answers, or long entries have been droppped.
 
 
 
42
 
43
  Each row consists of
44
 
@@ -49,6 +52,31 @@ Each row consists of
49
 
50
  Original extraction code by https://github.com/b-mc2
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  ## Statistics
53
 
54
  - 3dprinting: 1,006
 
37
 
38
  This dataset is taken from https://archive.org/details/stackexchange.
39
 
40
+ There's a single parquet file combining all stackexchange sites. The threads
41
+ have been filtered as follows: only threads with an accepted answer, for which
42
+ both the question and response is less than 1000 characters have been choosen.
43
+ Other answers, or questions without accepted answers, or long entries have been
44
+ droppped.
45
 
46
  Each row consists of
47
 
 
52
 
53
  Original extraction code by https://github.com/b-mc2
54
 
55
+
56
+ ## How to Reproduce this Dataset
57
+
58
+ 1. Download all XML files from the stackexchange archive into the xml/ folder
59
+ ```
60
+ ./download.py
61
+ ```
62
+ 2. Process the XML, filter conversations and convert to OA format into parquet/ folder
63
+
64
+ ```
65
+ ./process.py
66
+ ```
67
+ 3. Run stats on all files in the parquet/ folder
68
+ ```
69
+ ./stats.py
70
+ ```
71
+ 4. Combine all parquet files into one large stackexchange.parquet file
72
+ ```
73
+ ./combine.py
74
+ ```
75
+ 5. Upload to huggingface hub, you'll first need use huggingface-cli login
76
+ ```
77
+ ./upload.py
78
+ ```
79
+
80
  ## Statistics
81
 
82
  - 3dprinting: 1,006
combine.py CHANGED
@@ -1,7 +1,8 @@
1
  #!/usr/bin/env python3
2
  # Combine (and shorten) parquet files into a single file
3
 
4
- import os, glob, sys, re
 
5
  import pandas as pd
6
  from merge_parquets import merge_parquet_dir
7
 
@@ -15,18 +16,12 @@ for file in glob.glob("full/*.parquet"):
15
  df["METADATA"] = df["METADATA"].apply(
16
  lambda meta: {
17
  "tags": meta["tags"],
18
- "answer_score": int(meta["answer_score"])
19
- if "answer_score" in meta and meta["answer_score"]
20
- else 0,
21
- "question_score": int(meta["question_score"])
22
- if "question_score" in meta and meta["question_score"]
23
- else 0,
24
  }
25
  )
26
  df.to_parquet(file)
27
  after = len(df)
28
- print(
29
- f"Shortened {file} from {before} to {after} rows ({100 * after / before:.2f})"
30
- )
31
 
32
  merge_parquet_dir("full", "stackexchange.parquet")
 
1
  #!/usr/bin/env python3
2
  # Combine (and shorten) parquet files into a single file
3
 
4
+ import glob
5
+
6
  import pandas as pd
7
  from merge_parquets import merge_parquet_dir
8
 
 
16
  df["METADATA"] = df["METADATA"].apply(
17
  lambda meta: {
18
  "tags": meta["tags"],
19
+ "answer_score": int(meta["answer_score"]) if "answer_score" in meta and meta["answer_score"] else 0,
20
+ "question_score": int(meta["question_score"]) if "question_score" in meta and meta["question_score"] else 0,
 
 
 
 
21
  }
22
  )
23
  df.to_parquet(file)
24
  after = len(df)
25
+ print(f"Shortened {file} from {before} to {after} rows ({100 * after / before:.2f})")
 
 
26
 
27
  merge_parquet_dir("full", "stackexchange.parquet")
download.py CHANGED
@@ -3,19 +3,19 @@
3
  # Simple script to download StackExchange archive XML files with posts (threaded version)
4
  #
5
  # Note: you probably want to download stackoverflow.com-Posts.7z manually, as it is 18GB
6
- # and takes a long time to download. You can try using torrent:
7
  #
8
  # webtorrent https://archive.org/download/stackexchange/stackexchange_archive.torrent --select 658
9
  #
10
 
11
- import requests
12
  import concurrent.futures
13
  import os
14
- from bs4 import BeautifulSoup as bs
15
- import pandas as pd
16
  import re
17
 
18
- base_url = "https://ia600107.us.archive.org/view_archive.php?archive=/27/items/stackexchange/{0}&file=Posts.xml"
 
 
 
19
  DOWNLOAD_DIR = "xml/"
20
  NUM_PARALLEL = 20
21
  RE_IGNORE = r"_meta|stackoverflow\.com\-"
@@ -24,31 +24,26 @@ RE_IGNORE = r"_meta|stackoverflow\.com\-"
24
  def get_all_filenames():
25
  """
26
  Retrieve all urls from stackexchange archive.
27
- This needs quite some mangling because of special cases.
 
28
  """
29
  response = requests.get("https://archive.org/download/stackexchange")
30
  if response.ok:
31
  soup = bs(response.content, "html.parser")
32
  table = soup.find("table")
33
  link_tags = table.find_all("a")
34
- urls = {
35
- "stackoverflow": "https://archive.org/download/stackexchange/stackoverflow.com-Posts.7z"
36
- }
37
  for link in link_tags:
38
  url = link["href"]
39
  name = url.split(".stackexchange")[0].replace(".", "_").replace("-", "_")
40
  name = name.replace("_com_7z", "")
41
  if url.endswith("7z") and not re.search(RE_IGNORE, url):
42
- urls[name] = base_url.format(url)
43
  return urls
44
 
45
 
46
- urls = get_all_filenames()
47
-
48
-
49
  def download_url(dataset_name: str, url: str):
50
- if not os.path.exists(DOWNLOAD_DIR):
51
- os.mkdir(DOWNLOAD_DIR)
52
  cache_path = os.path.join(DOWNLOAD_DIR, dataset_name + ".xml")
53
  if os.path.exists(cache_path):
54
  print("Using cached: ", cache_path)
@@ -62,12 +57,18 @@ def download_url(dataset_name: str, url: str):
62
  return cache_path
63
 
64
 
65
- with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_PARALLEL) as executor:
66
- futures = [
67
- executor.submit(download_url, dataset, url) for dataset, url in urls.items()
68
- ]
 
 
 
 
 
 
 
69
 
70
- # Wait for all downloads to complete
71
- concurrent.futures.wait(futures)
72
 
73
- print("All downloads complete")
 
 
3
  # Simple script to download StackExchange archive XML files with posts (threaded version)
4
  #
5
  # Note: you probably want to download stackoverflow.com-Posts.7z manually, as it is 18GB
6
+ # and takes a days to download otherwise. You can try using the torrent:
7
  #
8
  # webtorrent https://archive.org/download/stackexchange/stackexchange_archive.torrent --select 658
9
  #
10
 
 
11
  import concurrent.futures
12
  import os
 
 
13
  import re
14
 
15
+ import requests
16
+ from bs4 import BeautifulSoup as bs
17
+
18
+ BASE_URL = "https://ia600107.us.archive.org/view_archive.php?archive=/27/items/stackexchange/{0}&file=Posts.xml"
19
  DOWNLOAD_DIR = "xml/"
20
  NUM_PARALLEL = 20
21
  RE_IGNORE = r"_meta|stackoverflow\.com\-"
 
24
  def get_all_filenames():
25
  """
26
  Retrieve all urls from stackexchange archive.
27
+ This needs quite some mangling because of special cases (i.e. stackoverflow is not in one 7z archive).
28
+ Ignore meta files.
29
  """
30
  response = requests.get("https://archive.org/download/stackexchange")
31
  if response.ok:
32
  soup = bs(response.content, "html.parser")
33
  table = soup.find("table")
34
  link_tags = table.find_all("a")
35
+ urls = {"stackoverflow": "https://archive.org/download/stackexchange/stackoverflow.com-Posts.7z"}
 
 
36
  for link in link_tags:
37
  url = link["href"]
38
  name = url.split(".stackexchange")[0].replace(".", "_").replace("-", "_")
39
  name = name.replace("_com_7z", "")
40
  if url.endswith("7z") and not re.search(RE_IGNORE, url):
41
+ urls[name] = BASE_URL.format(url)
42
  return urls
43
 
44
 
 
 
 
45
  def download_url(dataset_name: str, url: str):
46
+ os.makedirs(DOWNLOAD_DIR, exist_ok=True)
 
47
  cache_path = os.path.join(DOWNLOAD_DIR, dataset_name + ".xml")
48
  if os.path.exists(cache_path):
49
  print("Using cached: ", cache_path)
 
57
  return cache_path
58
 
59
 
60
+ def download_all():
61
+ urls = get_all_filenames()
62
+ with concurrent.futures.ThreadPoolExecutor(max_workers=NUM_PARALLEL) as executor:
63
+ futures = [executor.submit(download_url, dataset, url) for dataset, url in urls.items()]
64
+
65
+ # Wait for all downloads to complete
66
+ concurrent.futures.wait(futures)
67
+ print("All downloads complete, except for the large stackoverflow XML file")
68
+ print("Use torrent to download this one much quicker, then uncompress the 7z file")
69
+ print("and move the extracted stackoverflow.com-Posts.xml to xml/stackoverflow.xml")
70
+ print("webtorrent https://archive.org/download/stackexchange/stackexchange_archive.torrent --select 658")
71
 
 
 
72
 
73
+ if __name__ == "__main__":
74
+ download_all()
merge_parquets.py CHANGED
@@ -34,17 +34,7 @@ def stream_from_parquets(paths: Iterable[Path]) -> Iterable[pa.Table]:
34
  T = TypeVar("T")
35
 
36
 
37
- def coalesce(
38
- items: Iterable[T], max_size: int, sizer: Callable[[T], int] = len
39
- ) -> Iterable[list[T]]:
40
- """Coalesce items into chunks. Tries to maximize chunk size and not exceed max_size.
41
- If an item is larger than max_size, we will always exceed max_size, so make a
42
- best effort and place it in its own chunk.
43
- You can supply a custom sizer function to determine the size of an item.
44
- Default is len.
45
- >>> list(coalesce([1, 2, 11, 4, 4, 1, 2], 10, lambda x: x))
46
- [[1, 2], [11], [4, 4, 1], [2]]
47
- """
48
  batch = []
49
  current_size = 0
50
  for item in items:
@@ -59,9 +49,7 @@ def coalesce(
59
  yield batch
60
 
61
 
62
- def coalesce_parquets(
63
- paths: Iterable[Path], outpath: Path, max_size: int = 2**20
64
- ) -> None:
65
  tables = stream_from_parquets(paths)
66
  # Instead of coalescing using number of rows as your metric, you could
67
  # use pa.Table.nbytes or something.
 
34
  T = TypeVar("T")
35
 
36
 
37
+ def coalesce(items: Iterable[T], max_size: int, sizer: Callable[[T], int] = len) -> Iterable[list[T]]:
 
 
 
 
 
 
 
 
 
 
38
  batch = []
39
  current_size = 0
40
  for item in items:
 
49
  yield batch
50
 
51
 
52
+ def coalesce_parquets(paths: Iterable[Path], outpath: Path, max_size: int = 2**20) -> None:
 
 
53
  tables = stream_from_parquets(paths)
54
  # Instead of coalescing using number of rows as your metric, you could
55
  # use pa.Table.nbytes or something.
process.py CHANGED
@@ -2,23 +2,24 @@
2
  # Simple script to convert StackExchange XML to Open Assistant format
3
  # Original code by https://github.com/b-mc2
4
 
5
- import os, gc, glob, sys, re
6
- from bs4 import BeautifulSoup as bs
 
 
 
 
 
7
  import pandas as pd
8
  from html2text import html2text
9
- from datasets import load_dataset
10
  from lxml import etree
11
  from tqdm import tqdm
12
- import subprocess
13
- from merge_parquets import merge_parquet_dir
14
-
15
 
16
  XML_DIR = "./xml"
17
  SOURCE = "stackexchange-{0}"
18
  MAX_ANSWERS = 10
19
  QUESTION_SCORE_TRESHOLD = 0
20
  ANSWER_SCORE_TRESHOLD = 0
21
- PARQUET_FILE = "{0}.parquet"
22
  MAX_LENGTH = 1000 # max length of question or answer
23
 
24
 
@@ -115,9 +116,7 @@ def convert_to_oa(all, source):
115
 
116
  del all
117
 
118
- merged["INSTRUCTION"] = (
119
- merged["Title_q"] + "\n" + merged["Body_q"].apply(to_markdown)
120
- )
121
  merged["RESPONSE"] = merged["Body_a"].apply(to_markdown)
122
  merged["SOURCE"] = source
123
  merged["METADATA"] = merged.apply(create_metadata, axis=1)
@@ -142,6 +141,7 @@ def save_parquet(df, dataset):
142
  Save Dataframe to Parquet. See here for specs:
143
  https://projects.laion.ai/Open-Assistant/docs/data/datasets#creating-a-dataset-on-hugging-face
144
  """
 
145
  parquet_file = PARQUET_FILE.format(dataset)
146
  df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
147
  print(f"Converted {len(df)} instructions into {parquet_file}")
 
2
  # Simple script to convert StackExchange XML to Open Assistant format
3
  # Original code by https://github.com/b-mc2
4
 
5
+ import gc
6
+ import glob
7
+ import os
8
+ import re
9
+ import subprocess
10
+ import sys
11
+
12
  import pandas as pd
13
  from html2text import html2text
 
14
  from lxml import etree
15
  from tqdm import tqdm
 
 
 
16
 
17
  XML_DIR = "./xml"
18
  SOURCE = "stackexchange-{0}"
19
  MAX_ANSWERS = 10
20
  QUESTION_SCORE_TRESHOLD = 0
21
  ANSWER_SCORE_TRESHOLD = 0
22
+ PARQUET_FILE = "parquet/{0}.parquet"
23
  MAX_LENGTH = 1000 # max length of question or answer
24
 
25
 
 
116
 
117
  del all
118
 
119
+ merged["INSTRUCTION"] = merged["Title_q"] + "\n" + merged["Body_q"].apply(to_markdown)
 
 
120
  merged["RESPONSE"] = merged["Body_a"].apply(to_markdown)
121
  merged["SOURCE"] = source
122
  merged["METADATA"] = merged.apply(create_metadata, axis=1)
 
141
  Save Dataframe to Parquet. See here for specs:
142
  https://projects.laion.ai/Open-Assistant/docs/data/datasets#creating-a-dataset-on-hugging-face
143
  """
144
+ os.makedirs("parquet", exist_ok=True)
145
  parquet_file = PARQUET_FILE.format(dataset)
146
  df.to_parquet(parquet_file, row_group_size=100, engine="pyarrow", index=False)
147
  print(f"Converted {len(df)} instructions into {parquet_file}")
stats.md DELETED
@@ -1,186 +0,0 @@
1
- - 3dprinting: 1,006
2
- - academia: 6,956
3
- - ai: 1,169
4
- - android: 11,591
5
- - anime: 3,688
6
- - apple: 32,603
7
- - arduino: 3,725
8
- - askubuntu: 78,472
9
- - astronomy: 2,425
10
- - aviation: 4,945
11
- - avp: 1,949
12
- - beer: 387
13
- - bicycles: 4,835
14
- - bioacoustics: 70
15
- - bioinformatics: 903
16
- - biology: 5,344
17
- - bitcoin: 7,456
18
- - blender: 25,527
19
- - boardgames: 4,538
20
- - bricks: 1,457
21
- - buddhism: 911
22
- - cardano: 670
23
- - chemistry: 7,430
24
- - chess: 2,185
25
- - chinese: 4,897
26
- - christianity: 1,248
27
- - civicrm: 3,221
28
- - codegolf: 943
29
- - codereview: 2,171
30
- - coffee: 350
31
- - cogsci: 645
32
- - computergraphics: 540
33
- - conlang: 101
34
- - cooking: 7,951
35
- - craftcms: 4,533
36
- - crafts: 438
37
- - crypto: 4,425
38
- - cs: 9,478
39
- - cseducators: 71
40
- - cstheory: 2,196
41
- - datascience: 5,045
42
- - dba: 16,850
43
- - devops: 961
44
- - diy: 14,400
45
- - drones: 190
46
- - drupal: 24,090
47
- - dsp: 4,470
48
- - earthscience: 922
49
- - ebooks: 323
50
- - economics: 2,120
51
- - electronics: 41,717
52
- - elementaryos: 1,769
53
- - ell: 30,428
54
- - emacs: 7,140
55
- - engineering: 2,314
56
- - english: 42,415
57
- - eosio: 626
58
- - es_stackoverflow: 21,475
59
- - esperanto: 617
60
- - ethereum: 9,603
61
- - expatriates: 973
62
- - expressionengine: 3,638
63
- - fitness: 1,833
64
- - freelancing: 338
65
- - french: 5,193
66
- - gamedev: 9,678
67
- - gaming: 44,899
68
- - gardening: 4,492
69
- - genealogy: 487
70
- - german: 6,715
71
- - gis: 30,249
72
- - graphicdesign: 10,563
73
- - ham: 790
74
- - hardwarerecs: 647
75
- - health: 804
76
- - hermeneutics: 782
77
- - hinduism: 1,036
78
- - history: 1,776
79
- - homebrew: 2,357
80
- - hsm: 484
81
- - interpersonal: 199
82
- - iot: 331
83
- - iota: 292
84
- - islam: 1,496
85
- - italian: 1,356
86
- - ja_stackoverflow: 9,734
87
- - japanese: 13,862
88
- - joomla: 1,875
89
- - judaism: 6,156
90
- - korean: 754
91
- - languagelearning: 135
92
- - latin: 1,387
93
- - law: 3,475
94
- - lifehacks: 934
95
- - linguistics: 1,507
96
- - literature: 582
97
- - magento: 20,537
98
- - martialarts: 364
99
- - materials: 338
100
- - math: 501,019
101
- - matheducators: 316
102
- - mathematica: 19,529
103
- - mathoverflow_net_7z: 23,803
104
- - mechanics: 4,735
105
- - meta: 34,161
106
- - meta_askubuntu: 2,076
107
- - meta_mathoverflow_net_7z: 333
108
- - meta_serverfault: 823
109
- - meta_stackoverflow: 12,641
110
- - meta_superuser: 1,748
111
- - moderators: 39
112
- - monero: 1,443
113
- - money: 7,996
114
- - movies: 6,789
115
- - music: 5,740
116
- - musicfans: 781
117
- - mythology: 271
118
- - networkengineering: 4,637
119
- - opendata: 1,117
120
- - opensource: 805
121
- - or: 586
122
- - outdoors: 1,503
123
- - parenting: 815
124
- - patents: 582
125
- - pets: 1,081
126
- - philosophy: 1,505
127
- - photo: 6,386
128
- - physics: 35,386
129
- - pm: 982
130
- - poker: 431
131
- - politics: 1,903
132
- - portuguese: 658
133
- - proofassistants: 87
134
- - pt_stackoverflow: 27,650
135
- - puzzling: 11,959
136
- - quant: 3,303
137
- - quantumcomputing: 1,604
138
- - raspberrypi: 6,794
139
- - retrocomputing: 1,016
140
- - reverseengineering: 1,606
141
- - robotics: 1,020
142
- - rpg: 9,517
143
- - ru_stackoverflow: 106,714
144
- - rus: 8,210
145
- - russian: 1,960
146
- - salesforce: 27,962
147
- - scicomp: 1,403
148
- - scifi: 15,174
149
- - security: 11,733
150
- - serverfault: 81,229
151
- - sharepoint: 24,934
152
- - sitecore: 2,691
153
- - skeptics: 1,043
154
- - softwareengineering: 10,526
155
- - softwarerecs: 3,032
156
- - solana: 602
157
- - sound: 2,031
158
- - space: 3,145
159
- - spanish: 3,049
160
- - sports: 1,715
161
- - sqa: 1,944
162
- - stackapps: 702
163
- - stackoverflow: 4,269,779
164
- - stats: 23,102
165
- - stellar: 373
166
- - substrate: 812
167
- - superuser: 128,488
168
- - sustainability: 240
169
- - tex: 42,808
170
- - tezos: 635
171
- - tor: 887
172
- - travel: 9,957
173
- - tridion: 1,769
174
- - ukrainian: 577
175
- - unix: 54,338
176
- - ux: 7,403
177
- - vegetarianism: 151
178
- - vi: 4,360
179
- - webapps: 10,159
180
- - webmasters: 9,413
181
- - windowsphone: 1,110
182
- - woodworking: 677
183
- - wordpress: 24,270
184
- - workplace: 4,104
185
- - worldbuilding: 2,766
186
- - writers: 1,957
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
stats.py CHANGED
@@ -1,4 +1,6 @@
1
- import re, glob
 
 
2
  from pyarrow.parquet import ParquetDataset
3
 
4
 
 
1
+ import glob
2
+ import re
3
+
4
  from pyarrow.parquet import ParquetDataset
5
 
6
 
upload.py CHANGED
@@ -10,7 +10,7 @@ HF_DATASET = "donfu/oa-stackexchange"
10
 
11
  def upload_hf():
12
  """
13
- Upload to Hugging Face
14
  """
15
  parquet_file = PARQUET_FILE
16
  dataset = load_dataset("parquet", data_files=parquet_file, name="oa-stackexchange")
 
10
 
11
  def upload_hf():
12
  """
13
+ Upload to Hugging Face. Make sure you are logged in beforehand with `huggingface-cli login`
14
  """
15
  parquet_file = PARQUET_FILE
16
  dataset = load_dataset("parquet", data_files=parquet_file, name="oa-stackexchange")