codebyzeb commited on
Commit
4d0612f
β€’
1 Parent(s): a03a6bc
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. 100M/aochildes.txt +0 -3
  2. 100M/bnc_spoken.txt +0 -3
  3. 100M/cbt.txt +0 -3
  4. 100M/children_stories.txt +0 -3
  5. 100M/gutenberg.txt +0 -3
  6. 100M/open_subtitles.txt +0 -3
  7. 100M/qed.txt +0 -3
  8. 100M/simple_wikipedia.txt +0 -3
  9. 100M/wikipedia.txt +0 -3
  10. 10M/open_subtitles.txt +0 -3
  11. BabyLM.py +3 -3
  12. clean/100M/switchboard.txt +0 -0
  13. clean/10M/aochildes.txt +0 -0
  14. clean/10M/bnc_spoken.txt +0 -0
  15. clean/10M/cbt.txt +0 -0
  16. clean/10M/children_stories.txt +0 -0
  17. clean/10M/gutenberg.txt +0 -0
  18. clean/10M/qed.txt +0 -0
  19. clean/10M/simple_wikipedia.txt +0 -0
  20. clean/10M/switchboard.txt +0 -0
  21. clean/10M/wikipedia.txt +0 -0
  22. clean/dev/aochildes.txt +0 -0
  23. clean/dev/bnc_spoken.txt +0 -0
  24. clean/dev/cbt.txt +0 -0
  25. clean/dev/children_stories.txt +0 -0
  26. clean/dev/gutenberg.txt +0 -0
  27. clean/dev/qed.txt +0 -0
  28. clean/dev/simple_wikipedia.txt +0 -0
  29. clean/dev/switchboard.txt +0 -0
  30. clean/dev/wikipedia.txt +0 -0
  31. clean/test/aochildes.txt +0 -0
  32. clean/test/bnc_spoken.txt +0 -0
  33. clean/test/cbt.txt +0 -0
  34. clean/test/children_stories.txt +0 -0
  35. clean/test/gutenberg.txt +0 -0
  36. clean/test/qed.txt +0 -0
  37. clean/test/switchboard.txt +0 -0
  38. clean/test/wikipedia.txt +0 -0
  39. clean_data.py +230 -0
  40. dev/open_subtitles.txt +0 -3
  41. {100M β†’ original/100M}/switchboard.txt +0 -0
  42. {10M β†’ original/10M}/aochildes.txt +0 -0
  43. {10M β†’ original/10M}/bnc_spoken.txt +0 -0
  44. {10M β†’ original/10M}/cbt.txt +0 -0
  45. {10M β†’ original/10M}/children_stories.txt +0 -0
  46. {10M β†’ original/10M}/gutenberg.txt +0 -0
  47. {10M β†’ original/10M}/qed.txt +0 -0
  48. {10M β†’ original/10M}/simple_wikipedia.txt +0 -0
  49. {10M β†’ original/10M}/switchboard.txt +0 -0
  50. {10M β†’ original/10M}/wikipedia.txt +0 -0
100M/aochildes.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a9ade69911e4d7351e5d1427c2e415a61f63357068d88631cac56502e0f4654
3
- size 18211974
 
 
 
 
100M/bnc_spoken.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:42c1a30d5923d3b86272d553bcdc4f989a0b5e58c649249665080bd749916ccf
3
- size 42472678
 
 
 
 
100M/cbt.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:13cfc7743cea6752b5f22fa49c8977292428e176b11963d3906f10528e491401
3
- size 25742364
 
 
 
 
100M/children_stories.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0cd311ad21fa042f021a680820f60fd9599f4bc394f3b6e33fc7e20073247fd
3
- size 17447597
 
 
 
 
100M/gutenberg.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ece0df262b6d10472dea4be608974a21cb11520844e79128544ac41b0288384f
3
- size 54796262
 
 
 
 
100M/open_subtitles.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:758897a04d43c584f4969fabfcfd80aa3e37df1cbfa843719326bd2b0db6c756
3
- size 166489947
 
 
 
 
100M/qed.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ffa520f03fedbd5f6fd36babfeecc68061a6c455f5f708712ed572a404118609
3
- size 56962048
 
 
 
 
100M/simple_wikipedia.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:f94bfa6f1f15e4b7fcac833133fbaa5c34565f7a18c6f36ab89f9d68391b1ca4
3
- size 87972498
 
 
 
 
100M/wikipedia.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e43a7fd062cf5ead3f50b2c4b9662cbed0e9afe1ddb2e354fbfd2f1941bc2cef
3
- size 61643655
 
 
 
 
10M/open_subtitles.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ce0277990b9977ba3242b865fdd496e7fb94fa9531674a2721f9a4c11abbc3a
3
- size 16450052
 
 
 
 
BabyLM.py CHANGED
@@ -65,9 +65,9 @@ class BabyLM(datasets.GeneratorBasedBuilder):
65
  train_data_dir = "100M"
66
 
67
  urls_to_download = {
68
- "train": [f"{train_data_dir}/{fn}" for fn in filenames],
69
- "dev": [f"dev/{fn}" for fn in filenames],
70
- "test": [f"test/{fn}" for fn in filenames]
71
  }
72
 
73
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
 
65
  train_data_dir = "100M"
66
 
67
  urls_to_download = {
68
+ "train": [f"clean/{train_data_dir}/{fn}" for fn in filenames],
69
+ "dev": [f"clean/dev/{fn}" for fn in filenames],
70
+ "test": [f"clean/test/{fn}" for fn in filenames]
71
  }
72
 
73
  downloaded_files = dl_manager.download_and_extract(urls_to_download)
clean/100M/switchboard.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/aochildes.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/bnc_spoken.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/cbt.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/children_stories.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/gutenberg.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/qed.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/simple_wikipedia.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/switchboard.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/10M/wikipedia.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/aochildes.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/bnc_spoken.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/cbt.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/children_stories.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/gutenberg.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/qed.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/simple_wikipedia.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/switchboard.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/dev/wikipedia.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/test/aochildes.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/test/bnc_spoken.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/test/cbt.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/test/children_stories.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/test/gutenberg.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/test/qed.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/test/switchboard.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean/test/wikipedia.txt ADDED
The diff for this file is too large to render. See raw diff
 
clean_data.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Script used to clean the data. """
2
+
3
+ import os
4
+ import re
5
+ from nltk import tokenize
6
+
7
+ def clean_aochildes(lines):
8
+ """ For aochildes, we just remove the space between the punctuation mark and the final word """
9
+ new_lines = []
10
+ for line in lines:
11
+ new_lines.append(line[:-3] + line[-2:])
12
+ return new_lines
13
+
14
+ def clean_bnc_spoken(lines):
15
+ """ For bnc_spoken, we lowercase """
16
+ new_lines = []
17
+ for line in lines:
18
+ new_lines.append(line.lower())
19
+ return new_lines
20
+
21
+ def clean_cbt(lines):
22
+ """ For cbt, we lowercase and normalise punctuation """
23
+ punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", 'β€œ', '”', 'β€”', '–']
24
+ new_lines = []
25
+ for line in lines:
26
+ new_line = line.lower()
27
+ new_line = new_line.replace(": ' ", ": \"")
28
+ new_line = new_line.replace("''", "\"")
29
+ new_line = new_line.replace(" '\n", "\"\n")
30
+ new_line = new_line.replace(" ' ", "\" ")
31
+ new_line = new_line.replace(" `` ", " \"")
32
+ new_line = new_line.replace("` ", " \"")
33
+ new_line = new_line.replace("`", "\"")
34
+ new_line = new_line.replace("’", "\"")
35
+ for punct in punctuation:
36
+ new_line = new_line.replace(f" {punct}", punct)
37
+ new_lines.append(new_line)
38
+ return new_lines
39
+
40
+ def clean_children_stories(lines):
41
+ """ For children_stories, we lowercase and split long lines into sentences """
42
+ new_lines = []
43
+ for line in lines:
44
+ sentences = [s + '\n' for s in tokenize.sent_tokenize(line.lower().strip()) if s != '']
45
+ new_lines.extend(sentences)
46
+ return new_lines
47
+
48
+ def clean_gutenberg(lines):
49
+ """ For gutenberg, we lowercase, remove italics, group lines into paragraphs and then split into sentences """
50
+ # Get paragraphs
51
+ paragraphs = []
52
+ paragraph = ""
53
+ for line in lines:
54
+ # Remove italics
55
+ tmp_line = line.lower().strip().replace('_','')
56
+ if tmp_line == "" and paragraph != "":
57
+ paragraphs.append(paragraph[:-1] + '\n')
58
+ paragraph = ""
59
+ else:
60
+ paragraph += tmp_line + " "
61
+
62
+ # Split into sentences using NLTK
63
+ new_lines = []
64
+ for paragraph in paragraphs:
65
+ sentences = [s + '\n' for s in tokenize.sent_tokenize(paragraph) if s != '']
66
+ new_lines.extend(sentences)
67
+ return new_lines
68
+
69
+ def clean_open_subtitles(lines):
70
+ """ For open_subtitles, we lowercase, remove subtitle dashes and fix the lowercase 'l' problem """
71
+ punctuation = ['.', ',', '?', '!', ':', ';', '(', ')', '[', ']', '{', '}', '"', "'", 'β€œ', '”', 'β€”', '–', ' ', '\n']
72
+ new_lines = []
73
+ for line in lines:
74
+ new_line = line.lower()
75
+ if new_line[0:2] == "- ":
76
+ new_line = new_line[2:]
77
+ if new_line[0] == "-":
78
+ new_line = new_line[1:]
79
+ new_line = ' ' + new_line
80
+ for punct in punctuation:
81
+ new_line = new_line.replace(f" l{punct}", f" i{punct}")
82
+ new_line = new_line.replace(f" lm{punct}", f" im{punct}")
83
+ new_line = new_line.replace(f" lf{punct}", f" if{punct}")
84
+ new_line = new_line.replace(' lc', ' ic')
85
+ new_line = new_line.replace(' ld', ' id')
86
+ new_line = new_line.replace(' lj', ' i j')
87
+ new_line = new_line.replace(' ln', ' in')
88
+ new_line = new_line.replace(' lp', ' ip')
89
+ new_line = new_line.replace(' lr', ' ir')
90
+ new_line = new_line.replace(' ls', ' is')
91
+ new_line = new_line.replace(' isd', ' lsd')
92
+ new_line = new_line.replace(' lt', ' it')
93
+ new_line = new_line.replace(' lt', ' it')
94
+ new_line = new_line.replace(' lv', ' iv')
95
+ new_lines.append(new_line.strip() + '\n')
96
+ return new_lines
97
+
98
+ def clean_qed(lines):
99
+ """ For qed, we lowercase and normalise punctuation, remove words contained in parentheses,
100
+ remove lines that arejust character's names and fix the lowercase 'l' problem"""
101
+
102
+ new_lines = []
103
+ for line in lines:
104
+ # Before lowercasing, check if the words in the line are uppercase containing lowercase 'l' instead of 'I' and fix accordingly
105
+ words = line.split()
106
+ for i, word in enumerate(words):
107
+ if word.replace('l','I').isupper() and 'l' in word and word != 'I\'ll':
108
+ words[i] = word.replace('l', 'I')
109
+ new_line = ' '.join(words).lower()
110
+ new_line = new_line.replace(' lc', ' ic')
111
+ new_line = new_line.replace(' ld', ' id')
112
+ new_line = new_line.replace(' lj', ' i j')
113
+ new_line = new_line.replace(' ln', ' in')
114
+ new_line = new_line.replace(' lp', ' ip')
115
+ new_line = new_line.replace(' lr', ' ir')
116
+ new_line = new_line.replace(' ls', ' is')
117
+ new_line = new_line.replace(' isd', ' lsd')
118
+ new_line = new_line.replace(' lt', ' it')
119
+ new_line = new_line.replace(' lt', ' it')
120
+ new_line = new_line.replace(' lv', ' iv')
121
+
122
+ # Skip lines that are just character names, e.g. "AMY GOODMAN:"
123
+ if len(new_line.strip()) < 1 or (len(words) <= 3 and new_line.strip()[-1] == ':'):
124
+ continue
125
+
126
+ # Remove subtitle dashes
127
+ if new_line[0:2] == "- ":
128
+ new_line = new_line[2:]
129
+ if new_line[0] == "-":
130
+ new_line = new_line[1:]
131
+
132
+ # Remove substrings contained within circular or square parantheses (screen descriptions)
133
+ pattern = r'\([^)]*\)'
134
+ new_line = re.sub(pattern, '', new_line)
135
+ pattern = r'\[[^)]*\]'
136
+ new_line = re.sub(pattern, '', new_line)
137
+ new_line = new_line.replace('"', '\'')
138
+
139
+ # Remove strange characters
140
+ new_line = new_line.replace('#','')
141
+ new_line = new_line.replace('*','')
142
+
143
+ new_line = new_line.strip()
144
+ if new_line != "":
145
+ new_lines.append(new_line + '\n')
146
+ return new_lines
147
+
148
+ def clean_simple_wikipedia(lines):
149
+ """ For simple_wikipedia, we lowercase, remove empty lines and article names and split paragraphs into sentences."""
150
+ new_lines = []
151
+ next_line_is_article_name = False
152
+ for line in lines:
153
+ if next_line_is_article_name:
154
+ next_line_is_article_name = False
155
+ continue
156
+ if line.strip() == "":
157
+ next_line_is_article_name = True
158
+ continue
159
+ sentences = [s + '\n' for s in tokenize.sent_tokenize(line.lower()) if s != '']
160
+ new_lines.extend(sentences)
161
+ return new_lines
162
+
163
+ def clean_switchboard(lines):
164
+ """ For switchboard, we lowercase """
165
+ new_lines = []
166
+ for line in lines:
167
+ new_line = line.lower()
168
+ new_lines.append(new_line)
169
+ return new_lines
170
+
171
+ def clean_wikipedia(lines):
172
+ """ For wikipedia, we lowercase, remove empty lines and article names and split paragraphs into sentences.
173
+ We also remove lines that seem to be figure names or table entries. """
174
+ new_lines = []
175
+ for line in lines:
176
+ new_line = line.strip()
177
+ words = new_line.split()
178
+
179
+ # Remove empty lines and article names
180
+ if new_line == "":
181
+ continue
182
+ if new_line[0] == "=" and new_line[-1] == "=":
183
+ continue
184
+
185
+ # Filter out lines that seem to be figure names or table entries
186
+ all_numeric = True
187
+ all_uppercase = True
188
+ for word in words:
189
+ if not word.isnumeric():
190
+ all_numeric = False
191
+ if not word[0].isupper():
192
+ all_uppercase = False
193
+ if all_numeric or all_uppercase:
194
+ continue
195
+
196
+ # Split into sentences using NLTK
197
+ sentences = [s + '\n' for s in tokenize.sent_tokenize(new_line.lower()) if s != '']
198
+ new_lines.extend(sentences)
199
+ return new_lines
200
+
201
+ CLEAN_FUNCTIONS = {'aochildes' : clean_aochildes, 'bnc_spoken' : clean_bnc_spoken, 'cbt' : clean_cbt, 'children_stories' : clean_children_stories, 'gutenberg' : clean_gutenberg, 'open_subtitles' : clean_open_subtitles, 'qed' : clean_qed, 'simple_wikipedia' : clean_simple_wikipedia, 'switchboard' : clean_switchboard, 'wikipedia' : clean_wikipedia}
202
+ FOLDERS = ['10M', '100M', 'dev', 'test']
203
+
204
+ if __name__ == "__main__":
205
+
206
+ # Read all text files from directory "BabyLM"
207
+ all_files = []
208
+ for folder in FOLDERS:
209
+ for root, dirs, files in os.walk(f"original/{folder}"):
210
+ for file in files:
211
+ if file.endswith(".txt"):
212
+ all_files.append(os.path.join(root, file))
213
+
214
+ for file in all_files:
215
+ print(file)
216
+ with open(file, 'r') as f:
217
+ lines = f.readlines()
218
+
219
+ # Get the corpus name
220
+ corpus_name = os.path.basename(file).split('.')[0]
221
+
222
+ # Clean the data
223
+ if CLEAN_FUNCTIONS[corpus_name] is not None:
224
+ lines = CLEAN_FUNCTIONS[corpus_name](lines)
225
+
226
+ # Write the new file
227
+ new_file = file.replace('original', 'clean')
228
+ os.makedirs(os.path.dirname(new_file), exist_ok=True)
229
+ with open(new_file, 'w') as f:
230
+ f.writelines(lines)
dev/open_subtitles.txt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:72ee06e6fcbdb01b845c465be7da4089f373ac96bc3e3216e139ccca7fe92369
3
- size 15745512
 
 
 
 
{100M β†’ original/100M}/switchboard.txt RENAMED
File without changes
{10M β†’ original/10M}/aochildes.txt RENAMED
File without changes
{10M β†’ original/10M}/bnc_spoken.txt RENAMED
File without changes
{10M β†’ original/10M}/cbt.txt RENAMED
File without changes
{10M β†’ original/10M}/children_stories.txt RENAMED
File without changes
{10M β†’ original/10M}/gutenberg.txt RENAMED
File without changes
{10M β†’ original/10M}/qed.txt RENAMED
File without changes
{10M β†’ original/10M}/simple_wikipedia.txt RENAMED
File without changes
{10M β†’ original/10M}/switchboard.txt RENAMED
File without changes
{10M β†’ original/10M}/wikipedia.txt RENAMED
File without changes