|
|
|
import os |
|
import glob |
|
import codecs |
|
import spacy |
|
|
|
def replace_unicode_whitespaces_with_ascii_whitespace(string): |
|
return ' '.join(string.split()) |
|
|
|
def get_start_and_end_offset_of_token_from_spacy(token): |
|
start = token.idx |
|
end = start + len(token) |
|
return start, end |
|
|
|
def get_sentences_and_tokens_from_spacy(text, spacy_nlp): |
|
document = spacy_nlp(text) |
|
|
|
sentences = [] |
|
for span in document.sents: |
|
sentence = [document[i] for i in range(span.start, span.end)] |
|
sentence_tokens = [] |
|
for token in sentence: |
|
token_dict = {} |
|
token_dict['start'], token_dict['end'] = get_start_and_end_offset_of_token_from_spacy(token) |
|
token_dict['text'] = text[token_dict['start']:token_dict['end']] |
|
if token_dict['text'].strip() in ['\n', '\t', ' ', '']: |
|
continue |
|
|
|
if len(token_dict['text'].split(' ')) != 1: |
|
print("WARNING: the text of the token contains space character, replaced with hyphen\n\t{0}\n\t{1}".format(token_dict['text'], |
|
token_dict['text'].replace(' ', '-'))) |
|
token_dict['text'] = token_dict['text'].replace(' ', '-') |
|
sentence_tokens.append(token_dict) |
|
sentences.append(sentence_tokens) |
|
return sentences |
|
|
|
|
|
def get_entities_from_brat(text_filepath, annotation_filepath, verbose=False): |
|
|
|
with codecs.open(text_filepath, 'r', 'UTF-8') as f: |
|
text =f.read() |
|
if verbose: print("\ntext:\n{0}\n".format(text)) |
|
|
|
''' |
|
text2 = '' |
|
for word in text: |
|
text2 += elimina_tildes(word) |
|
''' |
|
text2 = text |
|
|
|
entities = [] |
|
with codecs.open(annotation_filepath, 'r', 'UTF-8') as f: |
|
for line in f.read().splitlines(): |
|
anno = line.split() |
|
id_anno = anno[0] |
|
|
|
if id_anno[0] == 'T': |
|
entity = {} |
|
entity['id'] = id_anno |
|
entity['type'] = anno[1] |
|
entity['start'] = int(anno[2]) |
|
entity['end'] = int(anno[3]) |
|
|
|
entity['text'] = ' '.join(anno[4:]) |
|
if verbose: |
|
print("entity: {0}".format(entity)) |
|
|
|
if replace_unicode_whitespaces_with_ascii_whitespace(text2[entity['start']:entity['end']]) != \ |
|
replace_unicode_whitespaces_with_ascii_whitespace(entity['text']): |
|
print("Warning: brat text and annotation do not match.") |
|
print("\ttext: {0}".format(text2[entity['start']:entity['end']])) |
|
print("\tanno: {0}".format(entity['text'])) |
|
|
|
entities.append(entity) |
|
if verbose: print("\n\n") |
|
|
|
return text2, entities |
|
|
|
|
|
def check_brat_annotation_and_text_compatibility(brat_folder): |
|
''' |
|
Check if brat annotation and text files are compatible. |
|
''' |
|
dataset_type = os.path.basename(brat_folder) |
|
print("Checking the validity of BRAT-formatted {0} set... ".format(dataset_type), end='') |
|
text_filepaths = sorted(glob.glob(os.path.join(brat_folder, '*.txt'))) |
|
for text_filepath in text_filepaths: |
|
base_filename = os.path.splitext(os.path.basename(text_filepath))[0] |
|
annotation_filepath = os.path.join(os.path.dirname(text_filepath), base_filename + '.ann') |
|
|
|
if not os.path.exists(annotation_filepath): |
|
raise IOError("Annotation file does not exist: {0}".format(annotation_filepath)) |
|
text, entities = get_entities_from_brat(text_filepath, annotation_filepath) |
|
print("Done.") |
|
|
|
def brat_to_conll(input_folder, output_filepath, language): |
|
''' |
|
Assumes '.txt' and '.ann' files are in the input_folder. |
|
Checks for the compatibility between .txt and .ann at the same time. |
|
''' |
|
spacy_nlp = spacy.load(language) |
|
|
|
verbose = False |
|
dataset_type = os.path.basename(input_folder) |
|
print("Formatting {0} set from BRAT to CONLL... ".format(dataset_type), end='') |
|
text_filepaths = sorted(glob.glob(os.path.join(input_folder, '*.txt'))) |
|
output_file = codecs.open(output_filepath, 'w', 'utf-8') |
|
for text_filepath in text_filepaths: |
|
base_filename = os.path.splitext(os.path.basename(text_filepath))[0] |
|
annotation_filepath = os.path.join(os.path.dirname(text_filepath), base_filename + '.ann') |
|
|
|
|
|
if not os.path.exists(annotation_filepath): |
|
codecs.open(annotation_filepath, 'w', 'UTF-8').close() |
|
|
|
text, entities = get_entities_from_brat(text_filepath, annotation_filepath) |
|
entities = sorted(entities, key=lambda entity:entity["start"]) |
|
|
|
sentences = get_sentences_and_tokens_from_spacy(text, spacy_nlp) |
|
|
|
for sentence in sentences: |
|
inside = False |
|
previous_token_label = 'O' |
|
for token in sentence: |
|
token['label'] = 'O' |
|
for entity in entities: |
|
if entity['start'] <= token['start'] < entity['end'] or \ |
|
entity['start'] < token['end'] <= entity['end'] or \ |
|
token['start'] < entity['start'] < entity['end'] < token['end']: |
|
|
|
token['label'] = entity['type'].replace('-', '_') |
|
|
|
break |
|
elif token['end'] < entity['start']: |
|
break |
|
|
|
if len(entities) == 0: |
|
entity={'end':0} |
|
if token['label'] == 'O': |
|
gold_label = 'O' |
|
inside = False |
|
elif inside and token['label'] == previous_token_label: |
|
gold_label = 'I-{0}'.format(token['label']) |
|
else: |
|
inside = True |
|
gold_label = 'B-{0}'.format(token['label']) |
|
if token['end'] == entity['end']: |
|
inside = False |
|
previous_token_label = token['label'] |
|
if verbose: print('{0} {1} {2} {3} {4}\n'.format(token['text'], base_filename, token['start'], token['end'], gold_label)) |
|
output_file.write('{0} {1} {2} {3} {4}\n'.format(token['text'], base_filename, token['start'], token['end'], gold_label)) |
|
if verbose: print('\n') |
|
output_file.write('\n') |
|
|
|
output_file.close() |
|
print('Done.') |
|
del spacy_nlp |
|
|