File size: 5,459 Bytes
a966ae1 eeb99b3 a966ae1 4b568ae a966ae1 4b568ae a966ae1 80e9572 a966ae1 eeb99b3 a966ae1 eeb99b3 a966ae1 eeb99b3 a966ae1 4b568ae a966ae1 eeb99b3 a966ae1 4b568ae eeb99b3 a966ae1 eeb99b3 a966ae1 eeb99b3 a966ae1 eeb99b3 a966ae1 eeb99b3 a966ae1 a245e72 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
""" Script used to tag the data with POS tags. """
import os
import re
from transformers import AutoTokenizer
import nltk, sys
TOKENIZER_NAME = 'cambridge-climb/CamBabyTokenizer-8192'
UNSUPERVISED_POS_TAG_MAP = {
"and" : 'CONJ',
"|" : 'NOUN',
"states" : 'NOUN',
"school" : 'NOUN',
".\"" : '.',
"-" : '.',
"five" : 'NUM',
"1" : 'NUM',
"they" : 'PRON',
"of" : 'ADP',
"are" : 'VERB',
"(" : '.',
"american" : 'ADJ',
"'s" : 'VERB',
"\"" : 'NOUN',
"the" : 'DET',
"a" : 'DET',
"after" : 'ADP',
"th" : 'NOUN',
"good" : 'ADJ',
"her" : 'PRON',
"night" : 'NOUN',
"to" : 'PRT',
"used" : 'VERB',
"," : '.',
"sir" : 'NOUN',
"tell" : 'VERB',
"lot" : 'NOUN',
"amp" : 'NOUN',
"doing" : 'VERB'
}
def tag_with_nltk(text, en_ptb_map):
""" Given a list of text, tag each word with its POS tag using NLTK """
new_lines = []
for line in text:
tokens = line.split()
tagged = nltk.pos_tag(tokens)
# Map the NLTK PTB tags to the universal tags
tagged = [(token, en_ptb_map[tag]) for (token, tag) in tagged]
new_lines.append(tagged)
return new_lines
def write_to_file(tagged, output_file):
""" Given a list of tagged lines, write them to the given output file """
with open(output_file, 'w') as f:
for line in tagged:
for token, tag in line:
f.write(f'{token}__<label>__{tag} ')
f.write('\n')
def tokenize_lines(text, tokenizer):
new_lines = []
for line in text:
tokens = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(line)
tokens = [t[0].replace("Ġ", "").replace('Ċ','\n') for t in tokens]
new_lines.append(' '.join(tokens))
return new_lines
def get_tags_from_file(file):
with open(file, 'r') as f:
lines = f.readlines()
gold_tagged_lines = []
pred_tagged_lines = []
gold_tagged = []
pred_tagged = []
total = 0
correct = 0
for line in lines:
line = line.strip()
if line == '':
gold_tagged_lines.append(gold_tagged)
pred_tagged_lines.append(pred_tagged)
gold_tagged = []
pred_tagged = []
else:
token, gold_tag, _, pred_tag = line.strip().split(' ')
gold_tagged.append((token, gold_tag))
# Use the manual map to map the predicted tags to the universal tags
pred_tagged.append((token, UNSUPERVISED_POS_TAG_MAP[pred_tag]))
total += 1
if gold_tag == UNSUPERVISED_POS_TAG_MAP[pred_tag]:
correct += 1
print(f' Unsupervised Tagging Accuracy: {correct/total}')
return gold_tagged_lines, pred_tagged_lines
def write_tagged_lines(filename, text, tagged_lines):
with open(filename, 'w') as f:
# Write the filename as the first line
f.write(filename.split('/')[-1] + '\n')
for line, tagged in zip(text, tagged_lines):
f.write(line)
f.write(' '.join([f'{token}__<label>__{tag}' for token, tag in tagged]) + '\n')
FOLDERS = ['10M', '100M', 'dev', 'test']
SECTION = "original"
RUN_UNSUPERVISED_TAGGER = True
if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_NAME)
# Read all text files from directory "BabyLM"
all_files = []
for folder in FOLDERS:
for root, dirs, files in os.walk(f"{SECTION}/{folder}"):
for file in files:
if file.endswith(".txt"):
all_files.append(os.path.join(root, file))
# Get map from PTB tags to universal tags
en_ptb_map = {}
with open('en-ptb.map', 'r') as f:
for line in f.readlines():
(key, val) = line.split()
en_ptb_map[key] = val
for file in all_files:
print(file)
with open(file, 'r') as f:
lines = f.readlines()[1:]
lines = [line.strip()+'\n' for line in lines if line.strip() != '']
tagged_file = file.replace(f'{SECTION}', f'{SECTION}_tagged')
gold_tagged_file = file.replace(f'{SECTION}', f'{SECTION}_tagged_gold')
# 1. Tokenize the lines in the text, tag with universal tags and write to tmp file
tokenized = tokenize_lines(lines, tokenizer)
tagged = tag_with_nltk(tokenized, en_ptb_map)
if not RUN_UNSUPERVISED_TAGGER:
# Save the gold tags
gold_tagged_lines = tagged
os.makedirs(os.path.dirname(gold_tagged_file), exist_ok=True)
write_tagged_lines(gold_tagged_file, lines, tagged)
continue
# 2. Run the unsupervised tagger on the tmp file
write_to_file(tagged, 'tmp.txt')
os.system(f'./../anchor/hmm --output ../pos_tagging/10M_train_30_extended --data tmp.txt --pred tmp_tagged.txt')
# 3. Get the gold tags and predicted tags
gold_tagged_lines, pred_tagged_lines = get_tags_from_file('tmp_tagged.txt')
os.remove('tmp.txt')
os.remove('tmp_tagged.txt')
assert len(gold_tagged_lines) == len(pred_tagged_lines) == len(lines)
# 4. Write the tagged lines to the original file
os.makedirs(os.path.dirname(tagged_file), exist_ok=True)
write_tagged_lines(tagged_file, lines, pred_tagged_lines)
os.makedirs(os.path.dirname(gold_tagged_file), exist_ok=True)
write_tagged_lines(gold_tagged_file, lines, gold_tagged_lines)
|