|
|
|
|
|
|
|
import fileinput |
|
import sys |
|
import io |
|
import os.path |
|
import argparse |
|
import logging |
|
import traceback |
|
|
|
|
|
|
|
|
|
import regex as re |
|
import unicodedata |
|
|
|
|
|
THISDIR=os.path.dirname(os.path.realpath(os.path.abspath(__file__))) |
|
|
|
|
|
COLCOUNT=10 |
|
ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC=range(COLCOUNT) |
|
COLNAMES='ID,FORM,LEMMA,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS,MISC'.split(',') |
|
TOKENSWSPACE=MISC+1 |
|
|
|
|
|
curr_line=0 |
|
sentence_line=0 |
|
sentence_id=None |
|
line_of_first_empty_node=None |
|
line_of_first_enhanced_orphan=None |
|
|
|
error_counter={} |
|
warn_on_missing_files=set() |
|
def warn(msg, error_type, testlevel=0, testid='some-test', lineno=True, nodelineno=0, nodeid=0): |
|
""" |
|
Print the warning. |
|
If lineno is True, print the number of the line last read from input. Note |
|
that once we have read a sentence, this is the number of the empty line |
|
after the sentence, hence we probably do not want to print it. |
|
If we still have an error that pertains to an individual node, and we know |
|
the number of the line where the node appears, we can supply it via |
|
nodelineno. Nonzero nodelineno means that lineno value is ignored. |
|
If lineno is False, print the number and starting line of the current tree. |
|
""" |
|
global curr_fname, curr_line, sentence_line, sentence_id, error_counter, tree_counter, args |
|
error_counter[error_type] = error_counter.get(error_type, 0)+1 |
|
if not args.quiet: |
|
if args.max_err>0 and error_counter[error_type]==args.max_err: |
|
print(('...suppressing further errors regarding ' + error_type), file=sys.stderr) |
|
elif args.max_err>0 and error_counter[error_type]>args.max_err: |
|
pass |
|
else: |
|
if len(args.input)>1: |
|
if curr_fname=="-": |
|
fn="(in STDIN) " |
|
else: |
|
fn="(in "+os.path.basename(curr_fname)+") " |
|
else: |
|
fn="" |
|
sent = '' |
|
node = '' |
|
|
|
|
|
if sentence_id: |
|
sent = ' Sent ' + sentence_id |
|
if nodeid: |
|
node = ' Node ' + str(nodeid) |
|
if nodelineno: |
|
print("[%sLine %d%s%s]: [L%d %s %s] %s" % (fn, nodelineno, sent, node, testlevel, error_type, testid, msg), file=sys.stderr) |
|
elif lineno: |
|
print("[%sLine %d%s%s]: [L%d %s %s] %s" % (fn, curr_line, sent, node, testlevel, error_type, testid, msg), file=sys.stderr) |
|
else: |
|
print("[%sTree number %d on line %d%s%s]: [L%d %s %s] %s" % (fn, tree_counter, sentence_line, sent, node, testlevel, error_type, testid, msg), file=sys.stderr) |
|
|
|
|
|
|
|
def is_whitespace(line): |
|
return re.match(r"^\s+$", line) |
|
|
|
def is_word(cols): |
|
return re.match(r"^[1-9][0-9]*$", cols[ID]) |
|
|
|
def is_multiword_token(cols): |
|
return re.match(r"^[1-9][0-9]*-[1-9][0-9]*$", cols[ID]) |
|
|
|
def is_empty_node(cols): |
|
return re.match(r"^[0-9]+\.[1-9][0-9]*$", cols[ID]) |
|
|
|
def parse_empty_node_id(cols): |
|
m = re.match(r"^([0-9]+)\.([0-9]+)$", cols[ID]) |
|
assert m, 'parse_empty_node_id with non-empty node' |
|
return m.groups() |
|
|
|
def shorten(string): |
|
return string if len(string) < 25 else string[:20]+'[...]' |
|
|
|
def lspec2ud(deprel): |
|
return deprel.split(':', 1)[0] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sentid_re=re.compile('^# sent_id\s*=\s*(\S+)$') |
|
def trees(inp, tag_sets, args): |
|
""" |
|
`inp` a file-like object yielding lines as unicode |
|
`tag_sets` and `args` are needed for choosing the tests |
|
|
|
This function does elementary checking of the input and yields one |
|
sentence at a time from the input stream. |
|
""" |
|
global curr_line, sentence_line, sentence_id |
|
comments=[] |
|
lines=[] |
|
testlevel = 1 |
|
testclass = 'Format' |
|
for line_counter, line in enumerate(inp): |
|
curr_line=line_counter+1 |
|
line=line.rstrip(u"\n") |
|
if is_whitespace(line): |
|
testid = 'pseudo-empty-line' |
|
testmessage = 'Spurious line that appears empty but is not; there are whitespace characters.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
if lines: |
|
yield comments, lines |
|
comments=[] |
|
lines=[] |
|
elif not line: |
|
if lines: |
|
yield comments, lines |
|
comments=[] |
|
lines=[] |
|
else: |
|
testid = 'extra-empty-line' |
|
testmessage = 'Spurious empty line. Only one empty line is expected after every sentence.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
elif line[0]=='#': |
|
|
|
|
|
|
|
|
|
match = sentid_re.match(line) |
|
if match: |
|
sentence_id = match.group(1) |
|
if not lines: |
|
comments.append(line) |
|
else: |
|
testid = 'misplaced-comment' |
|
testmessage = 'Spurious comment line. Comments are only allowed before a sentence.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
elif line[0].isdigit(): |
|
validate_unicode_normalization(line) |
|
if not lines: |
|
sentence_line=curr_line |
|
cols=line.split(u"\t") |
|
if len(cols)!=COLCOUNT: |
|
testid = 'number-of-columns' |
|
testmessage = 'The line has %d columns but %d are expected. The contents of the columns will not be checked.' % (len(cols), COLCOUNT) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
|
|
|
|
else: |
|
lines.append(cols) |
|
validate_cols_level1(cols) |
|
if args.level > 1: |
|
validate_cols(cols,tag_sets,args) |
|
else: |
|
testid = 'invalid-line' |
|
testmessage = "Spurious line: '%s'. All non-empty lines should start with a digit or the # character." % (line) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
else: |
|
if comments or lines: |
|
testid = 'missing-empty-line' |
|
testmessage = 'Missing empty line after the last sentence.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
yield comments, lines |
|
|
|
|
|
|
|
def validate_unicode_normalization(text): |
|
""" |
|
Tests that letters composed of multiple Unicode characters (such as a base |
|
letter plus combining diacritics) conform to NFC normalization (canonical |
|
decomposition followed by canonical composition). |
|
""" |
|
normalized_text = unicodedata.normalize('NFC', text) |
|
if text != normalized_text: |
|
|
|
firsti = -1 |
|
firstj = -1 |
|
inpfirst = '' |
|
nfcfirst = '' |
|
tcols = text.split("\t") |
|
ncols = normalized_text.split("\t") |
|
for i in range(len(tcols)): |
|
for j in range(len(tcols[i])): |
|
if tcols[i][j] != ncols[i][j]: |
|
firsti = i |
|
firstj = j |
|
inpfirst = unicodedata.name(tcols[i][j]) |
|
nfcfirst = unicodedata.name(ncols[i][j]) |
|
break |
|
if firsti >= 0: |
|
break |
|
testlevel = 1 |
|
testclass = 'Unicode' |
|
testid = 'unicode-normalization' |
|
testmessage = "Unicode not normalized: %s.character[%d] is %s, should be %s." % (COLNAMES[firsti], firstj, inpfirst, nfcfirst) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
whitespace_re=re.compile('.*\s',re.U) |
|
whitespace2_re=re.compile('.*\s\s', re.U) |
|
def validate_cols_level1(cols): |
|
""" |
|
Tests that can run on a single line and pertain only to the CoNLL-U file |
|
format, not to predefined sets of UD tags. |
|
""" |
|
testlevel = 1 |
|
testclass = 'Format' |
|
|
|
for col_idx in range(MISC+1): |
|
if col_idx >= len(cols): |
|
break |
|
|
|
if not cols[col_idx]: |
|
testid = 'empty-column' |
|
testmessage = 'Empty value in column %s.' % (COLNAMES[col_idx]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
else: |
|
|
|
if cols[col_idx][0].isspace(): |
|
testid = 'leading-whitespace' |
|
testmessage = 'Leading whitespace not allowed in column %s.' % (COLNAMES[col_idx]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if cols[col_idx][-1].isspace(): |
|
testid = 'trailing-whitespace' |
|
testmessage = 'Trailing whitespace not allowed in column %s.' % (COLNAMES[col_idx]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
if whitespace2_re.match(cols[col_idx]): |
|
testid = 'repeated-whitespace' |
|
testmessage = 'Two or more consecutive whitespace characters not allowed in column %s.' % (COLNAMES[col_idx]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
for col_idx in (ID,UPOS,XPOS,FEATS,HEAD,DEPREL,DEPS): |
|
if col_idx >= len(cols): |
|
break |
|
if whitespace_re.match(cols[col_idx]): |
|
testid = 'invalid-whitespace' |
|
testmessage = "White space not allowed in column %s: '%s'." % (COLNAMES[col_idx], cols[col_idx]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
if not (is_word(cols) or is_empty_node(cols) or is_multiword_token(cols)): |
|
testid = 'invalid-word-id' |
|
testmessage = "Unexpected ID format '%s'." % cols[ID] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
|
|
|
|
interval_re=re.compile('^([0-9]+)-([0-9]+)$',re.U) |
|
def validate_ID_sequence(tree): |
|
""" |
|
Validates that the ID sequence is correctly formed. |
|
""" |
|
testlevel = 1 |
|
testclass = 'Format' |
|
words=[] |
|
tokens=[] |
|
current_word_id, next_empty_id = 0, 1 |
|
for cols in tree: |
|
if not is_empty_node(cols): |
|
next_empty_id = 1 |
|
if is_word(cols): |
|
t_id=int(cols[ID]) |
|
current_word_id = t_id |
|
words.append(t_id) |
|
|
|
if not (tokens and tokens[-1][0]<=t_id and tokens[-1][1]>=t_id): |
|
tokens.append((t_id,t_id)) |
|
elif is_multiword_token(cols): |
|
match=interval_re.match(cols[ID]) |
|
if not match: |
|
testid = 'invalid-word-interval' |
|
testmessage = "Spurious word interval definition: '%s'." % cols[ID] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
beg,end=int(match.group(1)),int(match.group(2)) |
|
if not ((not words and beg >= 1) or (words and beg >= words[-1] + 1)): |
|
testid = 'misplaced-word-interval' |
|
testmessage = 'Multiword range not before its first word.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
tokens.append((beg,end)) |
|
elif is_empty_node(cols): |
|
word_id, empty_id = (int(i) for i in parse_empty_node_id(cols)) |
|
if word_id != current_word_id or empty_id != next_empty_id: |
|
testid = 'misplaced-empty-node' |
|
testmessage = 'Empty node id %s, expected %d.%d' % (cols[ID], current_word_id, next_empty_id) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
next_empty_id += 1 |
|
|
|
wrdstrseq = ','.join(str(x) for x in words) |
|
expstrseq = ','.join(str(x) for x in range(1, len(words)+1)) |
|
if wrdstrseq != expstrseq: |
|
testid = 'word-id-sequence' |
|
testmessage = "Words do not form a sequence. Got '%s'. Expected '%s'." % (wrdstrseq, expstrseq) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
|
|
|
|
for (b, e) in tokens: |
|
if e<b: |
|
testid = 'reversed-word-interval' |
|
testmessage = 'Spurious token interval %d-%d' % (b,e) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
if b<1 or e>len(words): |
|
testid = 'word-interval-out' |
|
testmessage = 'Spurious token interval %d-%d (out of range)' % (b,e) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
|
|
def validate_token_ranges(tree): |
|
""" |
|
Checks that the word ranges for multiword tokens are valid. |
|
""" |
|
testlevel = 1 |
|
testclass = 'Format' |
|
covered = set() |
|
for cols in tree: |
|
if not is_multiword_token(cols): |
|
continue |
|
m = interval_re.match(cols[ID]) |
|
if not m: |
|
testid = 'invalid-word-interval' |
|
testmessage = "Spurious word interval definition: '%s'." % cols[ID] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
start, end = m.groups() |
|
try: |
|
start, end = int(start), int(end) |
|
except ValueError: |
|
assert False, 'internal error' |
|
if not start < end: |
|
testid = 'reversed-word-interval' |
|
testmessage = 'Spurious token interval %d-%d' % (start, end) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
if covered & set(range(start, end+1)): |
|
testid = 'overlapping-word-intervals' |
|
testmessage = 'Range overlaps with others: %s' % cols[ID] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
covered |= set(range(start, end+1)) |
|
|
|
def validate_newlines(inp): |
|
if inp.newlines and inp.newlines!='\n': |
|
testlevel = 1 |
|
testclass = 'Format' |
|
testid = 'non-unix-newline' |
|
testmessage = 'Only the unix-style LF line terminator is allowed.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def validate_sent_id(comments,known_ids,lcode): |
|
testlevel = 2 |
|
testclass = 'Metadata' |
|
matched=[] |
|
for c in comments: |
|
match=sentid_re.match(c) |
|
if match: |
|
matched.append(match) |
|
else: |
|
if c.startswith('# sent_id') or c.startswith('#sent_id'): |
|
testid = 'invalid-sent-id' |
|
testmessage = "Spurious sent_id line: '%s' Should look like '# sent_id = xxxxx' where xxxxx is not whitespace. Forward slash reserved for special purposes." % c |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if not matched: |
|
testid = 'missing-sent-id' |
|
testmessage = 'Missing the sent_id attribute.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
elif len(matched)>1: |
|
testid = 'multiple-sent-id' |
|
testmessage = 'Multiple sent_id attributes.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
else: |
|
|
|
|
|
sid=matched[0].group(1) |
|
if sid in known_ids: |
|
testid = 'non-unique-sent-id' |
|
testmessage = "Non-unique sent_id attribute '%s'." % sid |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if sid.count(u"/")>1 or (sid.count(u"/")==1 and lcode!=u"ud" and lcode!=u"shopen"): |
|
testid = 'slash-in-sent-id' |
|
testmessage = "The forward slash is reserved for special use in parallel treebanks: '%s'" % sid |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
known_ids.add(sid) |
|
|
|
text_re=re.compile('^# text\s*=\s*(.+)$') |
|
def validate_text_meta(comments,tree): |
|
testlevel = 2 |
|
testclass = 'Metadata' |
|
matched=[] |
|
for c in comments: |
|
match=text_re.match(c) |
|
if match: |
|
matched.append(match) |
|
if not matched: |
|
testid = 'missing-text' |
|
testmessage = 'Missing the text attribute.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
elif len(matched)>1: |
|
testid = 'multiple-text' |
|
testmessage = 'Multiple text attributes.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
else: |
|
stext=matched[0].group(1) |
|
if stext[-1].isspace(): |
|
testid = 'text-trailing-whitespace' |
|
testmessage = 'The text attribute must not end with whitespace.' |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
skip_words=set() |
|
mismatch_reported=0 |
|
for cols in tree: |
|
if MISC >= len(cols): |
|
|
|
continue |
|
if 'NoSpaceAfter=Yes' in cols[MISC]: |
|
testid = 'nospaceafter-yes' |
|
testmessage = "'NoSpaceAfter=Yes' should be replaced with 'SpaceAfter=No'." |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if '.' in cols[ID]: |
|
if 'SpaceAfter=No' in cols[MISC]: |
|
testid = 'spaceafter-empty-node' |
|
testmessage = "'SpaceAfter=No' cannot occur with empty nodes." |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
elif '-' in cols[ID]: |
|
beg,end=cols[ID].split('-') |
|
try: |
|
begi,endi = int(beg),int(end) |
|
except ValueError as e: |
|
|
|
begi,endi=1,0 |
|
|
|
for i in range(begi, endi+1): |
|
skip_words.add(str(i)) |
|
elif cols[ID] in skip_words: |
|
if 'SpaceAfter=No' in cols[MISC]: |
|
testid = 'spaceafter-mwt-node' |
|
testmessage = "'SpaceAfter=No' cannot occur with words that are part of a multi-word token." |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
else: |
|
|
|
pass |
|
|
|
if not stext.startswith(cols[FORM]): |
|
if not mismatch_reported: |
|
testid = 'text-form-mismatch' |
|
testmessage = "Mismatch between the text attribute and the FORM field. Form[%s] is '%s' but text is '%s...'" % (cols[ID], cols[FORM], stext[:len(cols[FORM])+20]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
mismatch_reported=1 |
|
else: |
|
stext=stext[len(cols[FORM]):] |
|
if 'SpaceAfter=No' not in cols[MISC].split("|"): |
|
if args.check_space_after and (stext) and not stext[0].isspace(): |
|
testid = 'missing-spaceafter' |
|
testmessage = "'SpaceAfter=No' is missing in the MISC field of node #%s because the text is '%s'." % (cols[ID], shorten(cols[FORM]+stext)) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
stext=stext.lstrip() |
|
if stext: |
|
testid = 'text-extra-chars' |
|
testmessage = "Extra characters at the end of the text attribute, not accounted for in the FORM fields: '%s'" % stext |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
|
|
|
|
def validate_cols(cols, tag_sets, args): |
|
""" |
|
All tests that can run on a single line. Done as soon as the line is read, |
|
called from trees() if level>1. |
|
""" |
|
if is_word(cols) or is_empty_node(cols): |
|
validate_character_constraints(cols) |
|
validate_features(cols, tag_sets, args) |
|
validate_upos(cols,tag_sets) |
|
elif is_multiword_token(cols): |
|
validate_token_empty_vals(cols) |
|
|
|
if is_word(cols): |
|
validate_deprels(cols, tag_sets, args) |
|
elif is_empty_node(cols): |
|
validate_empty_node_empty_vals(cols) |
|
|
|
|
|
|
|
if args.level > 3: |
|
validate_whitespace(cols, tag_sets) |
|
|
|
def validate_token_empty_vals(cols): |
|
""" |
|
Checks that a multi-word token has _ empty values in all fields except MISC. |
|
This is required by UD guidelines although it is not a problem in general, |
|
therefore a level 2 test. |
|
""" |
|
assert is_multiword_token(cols), 'internal error' |
|
for col_idx in range(LEMMA, MISC): |
|
if cols[col_idx] != '_': |
|
testlevel = 2 |
|
testclass = 'Format' |
|
testid = 'mwt-nonempty-field' |
|
testmessage = "A multi-word token line must have '_' in the column %s. Now: '%s'." % (COLNAMES[col_idx], cols[col_idx]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
def validate_empty_node_empty_vals(cols): |
|
""" |
|
Checks that an empty node has _ empty values in HEAD and DEPREL. This is |
|
required by UD guidelines but not necessarily by CoNLL-U, therefore |
|
a level 2 test. |
|
""" |
|
assert is_empty_node(cols), 'internal error' |
|
for col_idx in (HEAD, DEPREL): |
|
if cols[col_idx]!= '_': |
|
testlevel = 2 |
|
testclass = 'Format' |
|
testid = 'mwt-nonempty-field' |
|
testmessage = "An empty node must have '_' in the column %s. Now: '%s'." % (COLNAMES[col_idx], cols[col_idx]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
|
|
|
|
|
|
|
|
|
|
edeprelpart_resrc = '[\p{Ll}\p{Lm}\p{Lo}\p{M}]+(_[\p{Ll}\p{Lm}\p{Lo}\p{M}]+)*'; |
|
|
|
|
|
|
|
|
|
|
|
edeprel_resrc = '^[a-z]+(:[a-z]+)?(:' + edeprelpart_resrc + ')?(:[a-z]+)?$' |
|
edeprel_re = re.compile(edeprel_resrc, re.U) |
|
def validate_character_constraints(cols): |
|
""" |
|
Checks general constraints on valid characters, e.g. that UPOS |
|
only contains [A-Z]. |
|
""" |
|
testlevel = 2 |
|
if is_multiword_token(cols): |
|
return |
|
if UPOS >= len(cols): |
|
return |
|
if not (re.match(r"^[A-Z]+$", cols[UPOS]) or (is_empty_node(cols) and cols[UPOS] == '_')): |
|
testclass = 'Morpho' |
|
testid = 'invalid-upos' |
|
testmessage = "Invalid UPOS value '%s'." % cols[UPOS] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if not (re.match(r"^[a-z]+(:[a-z]+)?$", cols[DEPREL]) or (is_empty_node(cols) and cols[DEPREL] == '_')): |
|
testclass = 'Syntax' |
|
testid = 'invalid-deprel' |
|
testmessage = "Invalid DEPREL value '%s'." % cols[DEPREL] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
try: |
|
deps = deps_list(cols) |
|
except ValueError: |
|
testclass = 'Enhanced' |
|
testid = 'invalid-deps' |
|
testmessage = "Failed to parse DEPS: '%s'." % cols[DEPS] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
return |
|
if any(deprel for head, deprel in deps_list(cols) |
|
if not edeprel_re.match(deprel)): |
|
testclass = 'Enhanced' |
|
testid = 'invalid-edeprel' |
|
testmessage = "Invalid enhanced relation type: '%s'." % cols[DEPS] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
attr_val_re=re.compile('^([A-Z0-9][A-Z0-9a-z]*(?:\[[a-z0-9]+\])?)=(([A-Z0-9][A-Z0-9a-z]*)(,([A-Z0-9][A-Z0-9a-z]*))*)$',re.U) |
|
val_re=re.compile('^[A-Z0-9][A-Z0-9a-z]*',re.U) |
|
def validate_features(cols, tag_sets, args): |
|
""" |
|
Checks general constraints on feature-value format. On level 4 and higher, |
|
also checks that a feature-value pair is listed as approved. (Every pair |
|
must be allowed on level 2 because it could be defined as language-specific. |
|
To disallow non-universal features, test on level 4 with language 'ud'.) |
|
""" |
|
testclass = 'Morpho' |
|
if FEATS >= len(cols): |
|
return |
|
feats=cols[FEATS] |
|
if feats == '_': |
|
return True |
|
feat_list=feats.split('|') |
|
if [f.lower() for f in feat_list]!=sorted(f.lower() for f in feat_list): |
|
testlevel = 2 |
|
testid = 'unsorted-features' |
|
testmessage = "Morphological features must be sorted: '%s'." % feats |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
attr_set=set() |
|
for f in feat_list: |
|
match=attr_val_re.match(f) |
|
if match is None: |
|
testlevel = 2 |
|
testid = 'invalid-feature' |
|
testmessage = "Spurious morphological feature: '%s'. Should be of the form Feature=Value and must start with [A-Z0-9] and only contain [A-Za-z0-9]." % f |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
attr_set.add(f) |
|
else: |
|
|
|
attr=match.group(1) |
|
attr_set.add(attr) |
|
values=match.group(2).split(',') |
|
if len(values) != len(set(values)): |
|
testlevel = 2 |
|
testid = 'repeated-feature-value' |
|
testmessage = "Repeated feature values are disallowed: '%s'" % feats |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if [v.lower() for v in values] != sorted(v.lower() for v in values): |
|
testlevel = 2 |
|
testid = 'unsorted-feature-values' |
|
testmessage = "If a feature has multiple values, these must be sorted: '%s'" % f |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
for v in values: |
|
if not val_re.match(v): |
|
testlevel = 2 |
|
testid = 'invalid-feature-value' |
|
testmessage = "Spurious value '%s' in '%s'. Must start with [A-Z0-9] and only contain [A-Za-z0-9]." % (v, f) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
|
|
|
|
if args.level > 3 and tag_sets[FEATS] is not None and attr+'='+v not in tag_sets[FEATS]: |
|
warn_on_missing_files.add("feat_val") |
|
testlevel = 4 |
|
testid = 'unknown-feature-value' |
|
testmessage = "Unknown feature-value pair '%s=%s'." % (attr, v) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if len(attr_set) != len(feat_list): |
|
testlevel = 2 |
|
testid = 'repeated-feature' |
|
testmessage = "Repeated features are disallowed: '%s'." % feats |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
def validate_upos(cols, tag_sets): |
|
if UPOS >= len(cols): |
|
return |
|
if is_empty_node(cols) and cols[UPOS] == '_': |
|
return |
|
if tag_sets[UPOS] is not None and cols[UPOS] not in tag_sets[UPOS]: |
|
testlevel = 2 |
|
testclass = 'Morpho' |
|
testid = 'unknown-upos' |
|
testmessage = "Unknown UPOS tag: '%s'." % cols[UPOS] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
def validate_deprels(cols, tag_sets, args): |
|
if DEPREL >= len(cols): |
|
return |
|
|
|
deprel = cols[DEPREL] |
|
testlevel = 4 |
|
if args.level < 4: |
|
deprel = lspec2ud(deprel) |
|
testlevel = 2 |
|
if tag_sets[DEPREL] is not None and deprel not in tag_sets[DEPREL]: |
|
warn_on_missing_files.add("deprel") |
|
testclass = 'Syntax' |
|
testid = 'unknown-deprel' |
|
testmessage = "Unknown DEPREL label: '%s'" % cols[DEPREL] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if DEPS >= len(cols): |
|
return |
|
if tag_sets[DEPS] is not None and cols[DEPS] != '_': |
|
for head_deprel in cols[DEPS].split('|'): |
|
try: |
|
head,deprel=head_deprel.split(':', 1) |
|
except ValueError: |
|
testclass = 'Enhanced' |
|
testid = 'invalid-head-deprel' |
|
testmessage = "Malformed head:deprel pair '%s'." % head_deprel |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
if args.level < 4: |
|
deprel = lspec2ud(deprel) |
|
if deprel not in tag_sets[DEPS]: |
|
warn_on_missing_files.add("edeprel") |
|
testclass = 'Enhanced' |
|
testid = 'unknown-edeprel' |
|
testmessage = "Unknown enhanced relation type '%s' in '%s'" % (deprel, head_deprel) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
|
|
|
|
def subset_to_words_and_empty_nodes(tree): |
|
""" |
|
Only picks word and empty node lines, skips multiword token lines. |
|
""" |
|
return [cols for cols in tree if is_word(cols) or is_empty_node(cols)] |
|
|
|
def deps_list(cols): |
|
if DEPS >= len(cols): |
|
return |
|
if cols[DEPS] == '_': |
|
deps = [] |
|
else: |
|
deps = [hd.split(':',1) for hd in cols[DEPS].split('|')] |
|
if any(hd for hd in deps if len(hd) != 2): |
|
raise ValueError('malformed DEPS: %s' % cols[DEPS]) |
|
return deps |
|
|
|
basic_head_re = re.compile('^(0|[1-9][0-9]*)$', re.U) |
|
enhanced_head_re = re.compile('^(0|[1-9][0-9]*)(\.[1-9][0-9]*)?$', re.U) |
|
def validate_ID_references(tree): |
|
""" |
|
Validates that HEAD and DEPS reference existing IDs. |
|
""" |
|
testlevel = 2 |
|
word_tree = subset_to_words_and_empty_nodes(tree) |
|
ids = set([cols[ID] for cols in word_tree]) |
|
for cols in word_tree: |
|
if HEAD >= len(cols): |
|
return |
|
|
|
|
|
if not is_empty_node(cols): |
|
match = basic_head_re.match(cols[HEAD]) |
|
if match is None: |
|
testclass = 'Format' |
|
testid = 'invalid-head' |
|
testmessage = "Invalid HEAD: '%s'." % cols[HEAD] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if not (cols[HEAD] in ids or cols[HEAD] == '0'): |
|
testclass = 'Syntax' |
|
testid = 'unknown-head' |
|
testmessage = "Undefined HEAD (no such ID): '%s'." % cols[HEAD] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if DEPS >= len(cols): |
|
return |
|
try: |
|
deps = deps_list(cols) |
|
except ValueError: |
|
|
|
testclass = 'Format' |
|
testid = 'invalid-deps' |
|
testmessage = "Failed to parse DEPS: '%s'." % cols[DEPS] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
for head, deprel in deps: |
|
match = enhanced_head_re.match(head) |
|
if match is None: |
|
testclass = 'Format' |
|
testid = 'invalid-ehead' |
|
testmessage = "Invalid enhanced head reference: '%s'." % head |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if not (head in ids or head == '0'): |
|
testclass = 'Enhanced' |
|
testid = 'unknown-ehead' |
|
testmessage = "Undefined enhanced head reference (no such ID): '%s'." % head |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
def validate_root(tree): |
|
""" |
|
Checks that DEPREL is "root" iff HEAD is 0. |
|
""" |
|
testlevel = 2 |
|
for cols in tree: |
|
if is_word(cols): |
|
if HEAD >= len(cols): |
|
continue |
|
if cols[HEAD] == '0' and lspec2ud(cols[DEPREL]) != 'root': |
|
testclass = 'Syntax' |
|
testid = '0-is-not-root' |
|
testmessage = "DEPREL must be 'root' if HEAD is 0." |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if cols[HEAD] != '0' and lspec2ud(cols[DEPREL]) == 'root': |
|
testclass = 'Syntax' |
|
testid = 'root-is-not-0' |
|
testmessage = "DEPREL cannot be 'root' if HEAD is not 0." |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if is_word(cols) or is_empty_node(cols): |
|
if DEPS >= len(cols): |
|
continue |
|
try: |
|
deps = deps_list(cols) |
|
except ValueError: |
|
|
|
testclass = 'Format' |
|
testid = 'invalid-deps' |
|
testmessage = "Failed to parse DEPS: '%s'." % cols[DEPS] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
continue |
|
for head, deprel in deps: |
|
if head == '0' and lspec2ud(deprel) != 'root': |
|
testclass = 'Enhanced' |
|
testid = 'enhanced-0-is-not-root' |
|
testmessage = "Enhanced relation type must be 'root' if head is 0." |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
if head != '0' and lspec2ud(deprel) == 'root': |
|
testclass = 'Enhanced' |
|
testid = 'enhanced-root-is-not-0' |
|
testmessage = "Enhanced relation type cannot be 'root' if head is not 0." |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
def validate_deps(tree): |
|
""" |
|
Validates that DEPS is correctly formatted and that there are no |
|
self-loops in DEPS. |
|
""" |
|
testlevel = 2 |
|
node_line = sentence_line - 1 |
|
for cols in tree: |
|
node_line += 1 |
|
if not (is_word(cols) or is_empty_node(cols)): |
|
continue |
|
if DEPS >= len(cols): |
|
continue |
|
try: |
|
deps = deps_list(cols) |
|
heads = [float(h) for h, d in deps] |
|
except ValueError: |
|
|
|
testclass = 'Format' |
|
testid = 'invalid-deps' |
|
testmessage = "Failed to parse DEPS: '%s'." % cols[DEPS] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodelineno=node_line) |
|
return |
|
if heads != sorted(heads): |
|
testclass = 'Format' |
|
testid = 'unsorted-deps' |
|
testmessage = "DEPS not sorted by head index: '%s'" % cols[DEPS] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodelineno=node_line) |
|
else: |
|
lasth = None |
|
lastd = None |
|
for h, d in deps: |
|
if h == lasth: |
|
if d < lastd: |
|
testclass = 'Format' |
|
testid = 'unsorted-deps-2' |
|
testmessage = "DEPS pointing to head '%s' not sorted by relation type: '%s'" % (h, cols[DEPS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodelineno=node_line) |
|
elif d == lastd: |
|
testclass = 'Format' |
|
testid = 'repeated-deps' |
|
testmessage = "DEPS contain multiple instances of the same relation '%s:%s'" % (h, d) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodelineno=node_line) |
|
lasth = h |
|
lastd = d |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
id_ = float(cols[ID]) |
|
except ValueError: |
|
|
|
return |
|
if id_ in heads: |
|
testclass = 'Enhanced' |
|
testid = 'deps-self-loop' |
|
testmessage = "Self-loop in DEPS for '%s'" % cols[ID] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodelineno=node_line) |
|
|
|
def validate_misc(tree): |
|
""" |
|
In general, the MISC column can contain almost anything. However, if there |
|
is a vertical bar character, it is interpreted as the separator of two |
|
MISC attributes, which may or may not have the form of attribute=value pair. |
|
In general it is not forbidden that the same attribute appears several times |
|
with different values, but this should not happen for selected attributes |
|
that are described in the UD documentation. |
|
""" |
|
testlevel = 2 |
|
testclass = 'Format' |
|
node_line = sentence_line - 1 |
|
for cols in tree: |
|
node_line += 1 |
|
if not (is_word(cols) or is_empty_node(cols)): |
|
continue |
|
if MISC >= len(cols): |
|
continue |
|
if cols[MISC] == '_': |
|
continue |
|
misc = [ma.split('=', 1) for ma in cols[MISC].split('|')] |
|
mamap = {} |
|
for ma in misc: |
|
if re.match(r"^(SpaceAfter|Translit|LTranslit|Gloss|LId|LDeriv)$", ma[0]): |
|
mamap.setdefault(ma[0], 0) |
|
mamap[ma[0]] = mamap[ma[0]] + 1 |
|
for a in list(mamap): |
|
if mamap[a] > 1: |
|
testid = 'repeated-misc' |
|
testmessage = "MISC attribute '%s' not supposed to occur twice" % a |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodelineno=node_line) |
|
|
|
def build_tree(sentence): |
|
""" |
|
Takes the list of non-comment lines (line = list of columns) describing |
|
a sentence. Returns a dictionary with items providing easier access to the |
|
tree structure. In case of fatal problems (missing HEAD etc.) returns None |
|
but does not report the error (presumably it has already been reported). |
|
|
|
tree ... dictionary: |
|
nodes ... array of word lines, i.e., lists of columns; |
|
mwt and empty nodes are skipped, indices equal to ids (nodes[0] is empty) |
|
children ... array of sets of children indices (numbers, not strings); |
|
indices to this array equal to ids (children[0] are the children of the root) |
|
linenos ... array of line numbers in the file, corresponding to nodes |
|
(needed in error messages) |
|
""" |
|
testlevel = 2 |
|
testclass = 'Syntax' |
|
global sentence_line |
|
node_line = sentence_line - 1 |
|
children = {} |
|
tree = { |
|
'nodes': [['0', '_', '_', '_', '_', '_', '_', '_', '_', '_']], |
|
'children': [], |
|
'linenos': [sentence_line] |
|
} |
|
for cols in sentence: |
|
node_line += 1 |
|
if not is_word(cols): |
|
continue |
|
|
|
|
|
if MISC >= len(cols): |
|
|
|
|
|
return None |
|
try: |
|
id_ = int(cols[ID]) |
|
except ValueError: |
|
|
|
|
|
return None |
|
try: |
|
head = int(cols[HEAD]) |
|
except ValueError: |
|
|
|
|
|
return None |
|
if head == id_: |
|
testid = 'head-self-loop' |
|
testmessage = 'HEAD == ID for %s' % cols[ID] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodelineno=node_line) |
|
return None |
|
tree['nodes'].append(cols) |
|
tree['linenos'].append(node_line) |
|
|
|
children.setdefault(cols[HEAD], set()).add(id_) |
|
for cols in tree['nodes']: |
|
tree['children'].append(sorted(children.get(cols[ID], []))) |
|
|
|
if len(tree['children'][0]) > 1 and args.single_root: |
|
testid = 'multiple-roots' |
|
testmessage = "Multiple root words: %s" % tree['children'][0] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
return None |
|
|
|
|
|
projection = set() |
|
get_projection(0, tree, projection) |
|
unreachable = set(range(1, len(tree['nodes']) - 1)) - projection |
|
if unreachable: |
|
testid = 'non-tree' |
|
testmessage = 'Non-tree structure. Words %s are not reachable from the root 0.' % (','.join(str(w) for w in sorted(unreachable))) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
return None |
|
return tree |
|
|
|
def get_projection(id, tree, projection): |
|
""" |
|
Like proj() above, but works with the tree data structure. Collects node ids |
|
in the set called projection. |
|
""" |
|
for child in tree['children'][id]: |
|
if child in projection: |
|
continue |
|
projection.add(child) |
|
get_projection(child, tree, projection) |
|
return projection |
|
|
|
def build_egraph(sentence): |
|
""" |
|
Takes the list of non-comment lines (line = list of columns) describing |
|
a sentence. Returns a dictionary with items providing easier access to the |
|
enhanced graph structure. In case of fatal problems returns None |
|
but does not report the error (presumably it has already been reported). |
|
However, once the graph has been found and built, this function verifies |
|
that the graph is connected and generates an error if it is not. |
|
|
|
egraph ... dictionary: |
|
nodes ... dictionary of dictionaries, each corresponding to a word or an empty node; mwt lines are skipped |
|
keys equal to node ids (i.e. strings that look like integers or decimal numbers; key 0 is the artificial root node) |
|
value is a dictionary-record: |
|
cols ... array of column values from the input line corresponding to the node |
|
children ... set of children ids (strings) |
|
lineno ... line number in the file (needed in error messages) |
|
""" |
|
global sentence_line |
|
node_line = sentence_line - 1 |
|
egraph_exists = False |
|
rootnode = { |
|
'cols': ['0', '_', '_', '_', '_', '_', '_', '_', '_', '_'], |
|
'deps': [], |
|
'parents': set(), |
|
'children': set(), |
|
'lineno': sentence_line |
|
} |
|
egraph = { |
|
'0': rootnode |
|
} |
|
nodeids = set() |
|
for cols in sentence: |
|
node_line += 1 |
|
if is_multiword_token(cols): |
|
continue |
|
if MISC >= len(cols): |
|
|
|
|
|
return None |
|
try: |
|
deps = deps_list(cols) |
|
heads = [h for h, d in deps] |
|
except ValueError: |
|
|
|
|
|
return None |
|
if is_empty_node(cols): |
|
egraph_exists = True |
|
nodeids.add(cols[ID]) |
|
|
|
|
|
egraph.setdefault(cols[ID], {}) |
|
egraph[cols[ID]]['cols'] = cols |
|
egraph[cols[ID]]['deps'] = deps_list(cols) |
|
egraph[cols[ID]]['parents'] = set([h for h, d in deps]) |
|
egraph[cols[ID]].setdefault('children', set()) |
|
egraph[cols[ID]]['lineno'] = node_line |
|
|
|
for h in heads: |
|
egraph_exists = True |
|
egraph.setdefault(h, {}) |
|
egraph[h].setdefault('children', set()).add(cols[ID]) |
|
|
|
|
|
if not egraph_exists: |
|
return None |
|
|
|
|
|
projection = set() |
|
get_graph_projection('0', egraph, projection) |
|
unreachable = nodeids - projection |
|
if unreachable: |
|
sur = sorted(unreachable) |
|
testlevel = 2 |
|
testclass = 'Enhanced' |
|
testid = 'unconnected-egraph' |
|
testmessage = "Enhanced graph is not connected. Nodes %s are not reachable from any root" % sur |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
return None |
|
return egraph |
|
|
|
def get_graph_projection(id, graph, projection): |
|
for child in graph[id]['children']: |
|
if child in projection: |
|
continue; |
|
projection.add(child) |
|
get_graph_projection(child, graph, projection) |
|
return projection |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def validate_upos_vs_deprel(id, tree): |
|
""" |
|
For certain relations checks that the dependent word belongs to an expected |
|
part-of-speech category. Occasionally we may have to check the children of |
|
the node, too. |
|
""" |
|
testlevel = 3 |
|
testclass = 'Syntax' |
|
cols = tree['nodes'][id] |
|
|
|
deprel = lspec2ud(cols[DEPREL]) |
|
childrels = set([lspec2ud(tree['nodes'][x][DEPREL]) for x in tree['children'][id]]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if deprel == 'det' and not re.match(r"^(DET|PRON)", cols[UPOS]) and not 'fixed' in childrels: |
|
testid = 'rel-upos-det' |
|
testmessage = "'det' should be 'DET' or 'PRON' but it is '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
|
|
|
|
|
|
if deprel == 'nummod' and not re.match(r"^(NUM|NOUN|SYM)$", cols[UPOS]): |
|
testid = 'rel-upos-nummod' |
|
testmessage = "'nummod' should be 'NUM' but it is '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
|
|
|
|
|
|
|
|
if deprel == 'advmod' and not re.match(r"^(ADV|ADJ|CCONJ|DET|PART|SYM)", cols[UPOS]) and not 'fixed' in childrels and not 'goeswith' in childrels: |
|
testid = 'rel-upos-advmod' |
|
testmessage = "'advmod' should be 'ADV' but it is '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
if deprel == 'expl' and not re.match(r"^(PRON|DET|PART)$", cols[UPOS]): |
|
testid = 'rel-upos-expl' |
|
testmessage = "'expl' should normally be 'PRON' but it is '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
if deprel == 'aux' and not re.match(r"^(AUX)", cols[UPOS]): |
|
testid = 'rel-upos-aux' |
|
testmessage = "'aux' should be 'AUX' but it is '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
if deprel == 'cop' and not re.match(r"^(AUX|PRON|DET|SYM)", cols[UPOS]): |
|
testid = 'rel-upos-cop' |
|
testmessage = "'cop' should be 'AUX' or 'PRON'/'DET' but it is '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
|
|
|
|
if deprel == 'compound' and re.match(r"^(AUX)", cols[UPOS]): |
|
testid = 'rel-upos-compound' |
|
testmessage = "'compound' should not be 'AUX'" |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
|
|
|
|
|
|
|
|
if deprel == 'case' and re.match(r"^(PROPN|ADJ|PRON|DET|NUM|AUX)", cols[UPOS]) and not 'fixed' in childrels: |
|
testid = 'rel-upos-case' |
|
testmessage = "'case' should not be '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
if deprel == 'mark' and re.match(r"^(NOUN|PROPN|ADJ|PRON|DET|NUM|VERB|AUX|INTJ)", cols[UPOS]) and not 'fixed' in childrels: |
|
testid = 'rel-upos-mark' |
|
testmessage = "'mark' should not be '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
if deprel == 'cc' and re.match(r"^(NOUN|PROPN|ADJ|PRON|DET|NUM|VERB|AUX|INTJ)", cols[UPOS]) and not 'fixed' in childrels: |
|
testid = 'rel-upos-cc' |
|
testmessage = "'cc' should not be '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
if cols[DEPREL] == 'punct' and cols[UPOS] != 'PUNCT': |
|
testid = 'rel-upos-punct' |
|
testmessage = "'punct' must be 'PUNCT' but it is '%s'" % (cols[UPOS]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
if cols[UPOS] == 'PUNCT' and not re.match(r"^(punct|root)", deprel): |
|
testid = 'upos-rel-punct' |
|
testmessage = "'PUNCT' must be 'punct' but it is '%s'" % (cols[DEPREL]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
def validate_left_to_right_relations(id, tree): |
|
""" |
|
Certain UD relations must always go left-to-right. |
|
Here we currently check the rule for the basic dependencies. |
|
The same should also be tested for the enhanced dependencies! |
|
""" |
|
testlevel = 3 |
|
testclass = 'Syntax' |
|
cols = tree['nodes'][id] |
|
if is_multiword_token(cols): |
|
return |
|
if DEPREL >= len(cols): |
|
return |
|
|
|
if re.match(r"^(conj|fixed|flat|goeswith|appos)", cols[DEPREL]): |
|
ichild = int(cols[ID]) |
|
iparent = int(cols[HEAD]) |
|
if ichild < iparent: |
|
|
|
|
|
|
|
testid = "right-to-left-%s" % lspec2ud(cols[DEPREL]) |
|
testmessage = "Relation '%s' must go left-to-right." % cols[DEPREL] |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
def validate_single_subject(id, tree): |
|
""" |
|
No predicate should have more than one subject. |
|
An xcomp dependent normally has no subject, but in some languages the |
|
requirement may be weaker: it could have an overt subject if it is |
|
correferential with a particular argument of the matrix verb. Hence we do |
|
not check zero subjects of xcomp dependents at present. |
|
Furthermore, in some situations we must allow two subjects (but not three or more). |
|
If a clause acts as a nonverbal predicate of another clause, and if there is |
|
no copula, then we must attach two subjects to the predicate of the inner |
|
clause: one is the predicate of the inner clause, the other is the predicate |
|
of the outer clause. This could in theory be recursive but in practice it isn't. |
|
See also issue 34 (https://github.com/UniversalDependencies/tools/issues/34). |
|
""" |
|
subjects = sorted([x for x in tree['children'][id] if re.search(r"subj", lspec2ud(tree['nodes'][x][DEPREL]))]) |
|
if len(subjects) > 2: |
|
|
|
testlevel = 3 |
|
testclass = 'Syntax' |
|
testid = 'too-many-subjects' |
|
testmessage = "Node has more than one subject: %s" % str(subjects) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
def validate_orphan(id, tree): |
|
""" |
|
The orphan relation is used to attach an unpromoted orphan to the promoted |
|
orphan in gapping constructions. A common error is that the promoted orphan |
|
gets the orphan relation too. The parent of orphan is typically attached |
|
via a conj relation, although some other relations are plausible too. |
|
""" |
|
|
|
deprel = lspec2ud(tree['nodes'][id][DEPREL]) |
|
if deprel == 'orphan': |
|
pid = int(tree['nodes'][id][HEAD]) |
|
pdeprel = lspec2ud(tree['nodes'][pid][DEPREL]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not re.match(r"^(conj|parataxis|root|csubj|ccomp|advcl|acl|reparandum)$", pdeprel): |
|
testlevel = 3 |
|
testclass = 'Syntax' |
|
testid = 'orphan-parent' |
|
testmessage = "The parent of 'orphan' should normally be 'conj' but it is '%s'." % (pdeprel) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
def validate_functional_leaves(id, tree): |
|
""" |
|
Most of the time, function-word nodes should be leaves. This function |
|
checks for known exceptions and warns in the other cases. |
|
""" |
|
testlevel = 3 |
|
testclass = 'Syntax' |
|
|
|
deprel = lspec2ud(tree['nodes'][id][DEPREL]) |
|
if re.match(r"^(case|mark|cc|aux|cop|det|fixed|goeswith|punct)$", deprel): |
|
idparent = id |
|
for idchild in tree['children'][id]: |
|
|
|
pdeprel = lspec2ud(tree['nodes'][idparent][DEPREL]) |
|
|
|
|
|
cdeprel = lspec2ud(tree['nodes'][idchild][DEPREL]) |
|
|
|
|
|
|
|
|
|
cupos = tree['nodes'][idchild][UPOS] |
|
cfeats = tree['nodes'][idchild][FEATS].split('|') |
|
if pdeprel != 'punct' and cdeprel == 'advmod' and re.match(r"^(PART|ADV)$", cupos) and 'Polarity=Neg' in cfeats: |
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gap = get_gap(idparent, tree) |
|
if gap and cdeprel == 'punct': |
|
continue |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if re.match(r"^(mark|case)$", pdeprel) and not re.match(r"^(advmod|obl|goeswith|fixed|reparandum|conj|cc|punct)$", cdeprel): |
|
testid = 'leaf-mark-case' |
|
testmessage = "'%s' not expected to have children (%s:%s:%s --> %s:%s:%s)" % (pdeprel, idparent, tree['nodes'][idparent][FORM], pdeprel, idchild, tree['nodes'][idchild][FORM], cdeprel) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][idchild]) |
|
|
|
|
|
|
|
|
|
if re.match(r"^(aux|cop)$", pdeprel) and not re.match(r"^(goeswith|fixed|reparandum|conj|cc|punct)$", cdeprel): |
|
testid = 'leaf-aux-cop' |
|
testmessage = "'%s' not expected to have children (%s:%s:%s --> %s:%s:%s)" % (pdeprel, idparent, tree['nodes'][idparent][FORM], pdeprel, idchild, tree['nodes'][idchild][FORM], cdeprel) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][idchild]) |
|
if re.match(r"^(cc)$", pdeprel) and not re.match(r"^(goeswith|fixed|reparandum|conj|punct)$", cdeprel): |
|
testid = 'leaf-cc' |
|
testmessage = "'%s' not expected to have children (%s:%s:%s --> %s:%s:%s)" % (pdeprel, idparent, tree['nodes'][idparent][FORM], pdeprel, idchild, tree['nodes'][idchild][FORM], cdeprel) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][idchild]) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
elif pdeprel == 'fixed' and not re.match(r"^(goeswith|reparandum|conj|punct)$", cdeprel): |
|
testid = 'leaf-fixed' |
|
testmessage = "'%s' not expected to have children (%s:%s:%s --> %s:%s:%s)" % (pdeprel, idparent, tree['nodes'][idparent][FORM], pdeprel, idchild, tree['nodes'][idchild][FORM], cdeprel) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][idchild]) |
|
|
|
elif pdeprel == 'goeswith': |
|
testid = 'leaf-goeswith' |
|
testmessage = "'%s' not expected to have children (%s:%s:%s --> %s:%s:%s)" % (pdeprel, idparent, tree['nodes'][idparent][FORM], pdeprel, idchild, tree['nodes'][idchild][FORM], cdeprel) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][idchild]) |
|
|
|
|
|
elif pdeprel == 'punct' and cdeprel != 'punct': |
|
testid = 'leaf-punct' |
|
testmessage = "'%s' not expected to have children (%s:%s:%s --> %s:%s:%s)" % (pdeprel, idparent, tree['nodes'][idparent][FORM], pdeprel, idchild, tree['nodes'][idchild][FORM], cdeprel) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][idchild]) |
|
|
|
def collect_ancestors(id, tree, ancestors): |
|
""" |
|
Usage: ancestors = collect_ancestors(nodeid, nodes, []) |
|
""" |
|
pid = int(tree['nodes'][int(id)][HEAD]) |
|
if pid == 0: |
|
ancestors.append(0) |
|
return ancestors |
|
if pid in ancestors: |
|
|
|
return ancestors |
|
ancestors.append(pid) |
|
return collect_ancestors(pid, tree, ancestors) |
|
|
|
def get_caused_nonprojectivities(id, tree): |
|
""" |
|
Checks whether a node is in a gap of a nonprojective edge. Report true only |
|
if the node's parent is not in the same gap. (We use this function to check |
|
that a punctuation node does not cause nonprojectivity. But if it has been |
|
dragged to the gap with a larger subtree, then we do not blame it.) |
|
|
|
tree ... dictionary: |
|
nodes ... array of word lines, i.e., lists of columns; mwt and empty nodes are skipped, indices equal to ids (nodes[0] is empty) |
|
children ... array of sets of children indices (numbers, not strings); indices to this array equal to ids (children[0] are the children of the root) |
|
linenos ... array of line numbers in the file, corresponding to nodes (needed in error messages) |
|
""" |
|
iid = int(id) |
|
|
|
|
|
|
|
ancestors = collect_ancestors(iid, tree, []) |
|
maxid = len(tree['nodes']) - 1 |
|
|
|
|
|
pid = int(tree['nodes'][iid][HEAD]) |
|
if pid < iid: |
|
left = range(pid + 1, iid) |
|
right = range(iid + 1, maxid + 1) |
|
else: |
|
left = range(1, iid) |
|
right = range(iid + 1, pid) |
|
|
|
sancestors = set(ancestors) |
|
leftna = [x for x in left if int(tree['nodes'][x][HEAD]) not in sancestors] |
|
rightna = [x for x in right if int(tree['nodes'][x][HEAD]) not in sancestors] |
|
leftcross = [x for x in leftna if int(tree['nodes'][x][HEAD]) > iid] |
|
rightcross = [x for x in rightna if int(tree['nodes'][x][HEAD]) < iid] |
|
|
|
if pid < iid: |
|
rightcross = [x for x in rightcross if int(tree['nodes'][x][HEAD]) > pid] |
|
else: |
|
leftcross = [x for x in leftcross if int(tree['nodes'][x][HEAD]) < pid] |
|
|
|
return sorted(leftcross + rightcross) |
|
|
|
def get_gap(id, tree): |
|
iid = int(id) |
|
pid = int(tree['nodes'][iid][HEAD]) |
|
if iid < pid: |
|
rangebetween = range(iid + 1, pid) |
|
else: |
|
rangebetween = range(pid + 1, iid) |
|
gap = set() |
|
if rangebetween: |
|
projection = set() |
|
get_projection(pid, tree, projection) |
|
gap = set(rangebetween) - projection |
|
return gap |
|
|
|
def validate_goeswith_span(id, tree): |
|
""" |
|
The relation 'goeswith' is used to connect word parts that are separated |
|
by whitespace and should be one word instead. We assume that the relation |
|
goes left-to-right, which is checked elsewhere. Here we check that the |
|
nodes really were separated by whitespace. If there is another node in the |
|
middle, it must be also attached via 'goeswith'. The parameter id refers to |
|
the node whose goeswith children we test. |
|
""" |
|
testlevel = 3 |
|
testclass = 'Syntax' |
|
gwchildren = sorted([x for x in tree['children'][id] if lspec2ud(tree['nodes'][x][DEPREL]) == 'goeswith']) |
|
if gwchildren: |
|
gwlist = sorted([id] + gwchildren) |
|
gwrange = list(range(id, int(tree['nodes'][gwchildren[-1]][ID]) + 1)) |
|
|
|
if gwlist != gwrange: |
|
testid = 'goeswith-gap' |
|
testmessage = "Violation of guidelines: gaps in goeswith group %s != %s." % (str(gwlist), str(gwrange)) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
nospaceafter = [x for x in gwlist[:-1] if 'SpaceAfter=No' in tree['nodes'][x][MISC].split('|')] |
|
if nospaceafter: |
|
testid = 'goeswith-nospace' |
|
testmessage = "'goeswith' cannot connect nodes that are not separated by whitespace" |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
def validate_fixed_span(id, tree): |
|
""" |
|
Like with goeswith, the fixed relation should not in general skip words that |
|
are not part of the fixed expression. Unlike goeswith however, there can be |
|
an intervening punctuation symbol. |
|
|
|
Update 2019-04-13: The rule that fixed expressions cannot be discontiguous |
|
has been challenged with examples from Swedish and Coptic, see |
|
https://github.com/UniversalDependencies/docs/issues/623 |
|
For the moment, I am turning this test off. In the future, we should |
|
distinguish fatal errors from warnings and then this test will perhaps be |
|
just a warning. |
|
""" |
|
return |
|
fxchildren = sorted([i for i in tree['children'][id] if lspec2ud(tree['nodes'][i][DEPREL]) == 'fixed']) |
|
if fxchildren: |
|
fxlist = sorted([id] + fxchildren) |
|
fxrange = list(range(id, int(tree['nodes'][fxchildren[-1]][ID]) + 1)) |
|
|
|
fxdiff = set(fxrange) - set(fxlist) |
|
fxgap = [i for i in fxdiff if lspec2ud(tree['nodes'][i][DEPREL]) != 'punct'] |
|
if fxgap: |
|
testlevel = 3 |
|
testclass = 'Syntax' |
|
testid = 'fixed-gap' |
|
testmessage = "Gaps in fixed expression %s" % str(fxlist) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
def validate_projective_punctuation(id, tree): |
|
""" |
|
Punctuation is not supposed to cause nonprojectivity or to be attached |
|
nonprojectively. |
|
""" |
|
testlevel = 3 |
|
testclass = 'Syntax' |
|
|
|
deprel = lspec2ud(tree['nodes'][id][DEPREL]) |
|
if deprel == 'punct': |
|
nonprojnodes = get_caused_nonprojectivities(id, tree) |
|
if nonprojnodes: |
|
testid = 'punct-causes-nonproj' |
|
testmessage = "Punctuation must not cause non-projectivity of nodes %s" % nonprojnodes |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
gap = get_gap(id, tree) |
|
if gap: |
|
testid = 'punct-is-nonproj' |
|
testmessage = "Punctuation must not be attached non-projectively over nodes %s" % sorted(gap) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=tree['linenos'][id]) |
|
|
|
def validate_annotation(tree): |
|
""" |
|
Checks universally valid consequences of the annotation guidelines. |
|
""" |
|
for node in tree['nodes']: |
|
id = int(node[ID]) |
|
validate_upos_vs_deprel(id, tree) |
|
validate_left_to_right_relations(id, tree) |
|
validate_single_subject(id, tree) |
|
validate_orphan(id, tree) |
|
validate_functional_leaves(id, tree) |
|
validate_fixed_span(id, tree) |
|
validate_goeswith_span(id, tree) |
|
validate_projective_punctuation(id, tree) |
|
|
|
def validate_enhanced_annotation(graph): |
|
""" |
|
Checks universally valid consequences of the annotation guidelines in the |
|
enhanced representation. Currently tests only phenomena specific to the |
|
enhanced dependencies; however, we should also test things that are |
|
required in the basic dependencies (such as left-to-right coordination), |
|
unless it is obvious that in enhanced dependencies such things are legal. |
|
""" |
|
testlevel = 3 |
|
testclass = 'Enhanced' |
|
|
|
|
|
|
|
|
|
global line_of_first_empty_node |
|
global line_of_first_enhanced_orphan |
|
for id in graph.keys(): |
|
if is_empty_node(graph[id]['cols']): |
|
if not line_of_first_empty_node: |
|
|
|
line_of_first_empty_node = graph[id]['lineno'] |
|
|
|
|
|
if line_of_first_enhanced_orphan: |
|
testid = 'empty-node-after-eorphan' |
|
testmessage = "Empty node means that we address gapping and there should be no orphans in the enhanced graph; but we saw one on line %s" % line_of_first_enhanced_orphan |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=graph[id]['lineno']) |
|
udeprels = set([lspec2ud(d) for h, d in graph[id]['deps']]) |
|
if 'orphan' in udeprels: |
|
if not line_of_first_enhanced_orphan: |
|
|
|
line_of_first_enhanced_orphan = graph[id]['lineno'] |
|
|
|
if line_of_first_empty_node: |
|
testid = 'eorphan-after-empty-node' |
|
testmessage = "'orphan' not allowed in enhanced graph because we saw an empty node on line %s" % line_of_first_empty_node |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=id, nodelineno=graph[id]['lineno']) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def validate_whitespace(cols, tag_sets): |
|
""" |
|
Checks a single line for disallowed whitespace. |
|
Here we assume that all language-independent whitespace-related tests have |
|
already been done in validate_cols_level1(), so we only check for words |
|
with spaces that are explicitly allowed in a given language. |
|
""" |
|
testlevel = 4 |
|
testclass = 'Format' |
|
for col_idx in (FORM,LEMMA): |
|
if col_idx >= len(cols): |
|
break |
|
if whitespace_re.match(cols[col_idx]) is not None: |
|
|
|
for regex in tag_sets[TOKENSWSPACE]: |
|
if regex.fullmatch(cols[col_idx]): |
|
break |
|
else: |
|
warn_on_missing_files.add('tokens_w_space') |
|
testid = 'invalid-word-with-space' |
|
testmessage = "'%s' in column %s is not on the list of exceptions allowed to contain whitespace (data/tokens_w_space.LANG files)." % (cols[col_idx], COLNAMES[col_idx]) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def validate_auxiliary_verbs(cols, children, nodes, line, lang): |
|
""" |
|
Verifies that the UPOS tag AUX is used only with lemmas that are known to |
|
act as auxiliary verbs or particles in the given language. |
|
Parameters: |
|
'cols' ....... columns of the head node |
|
'children' ... list of ids |
|
'nodes' ...... dictionary where we can translate the node id into its |
|
CoNLL-U columns |
|
'line' ....... line number of the node within the file |
|
""" |
|
if cols[UPOS] == 'AUX' and cols[LEMMA] != '_': |
|
|
|
auxdict = { |
|
|
|
'en': ['be', 'have', 'do', 'will', 'would', 'may', 'might', 'can', 'could', 'shall', 'should', 'must', 'get', 'ought'], |
|
'af': ['is', 'wees', 'het', 'word', 'sal', 'wil', 'mag', 'durf', 'kan', 'moet'], |
|
|
|
'nl': ['zijn', 'hebben', 'worden', 'krijgen', 'kunnen', 'mogen', 'zullen', 'moeten'], |
|
'de': ['sein', 'haben', 'werden', 'dürfen', 'können', 'mögen', 'wollen', 'sollen', 'müssen'], |
|
'sv': ['vara', 'ha', 'bli', 'komma', 'få', 'kunna', 'kunde', 'vilja', 'torde', 'behöva', 'böra', 'skola', 'måste', 'må', 'lär', 'do'], |
|
'no': ['være', 'vere', 'ha', 'verte', 'bli', 'få', 'kunne', 'ville', 'vilje', 'tørre', 'tore', 'burde', 'skulle', 'måtte'], |
|
'da': ['være', 'have', 'blive', 'kunne', 'ville', 'turde', 'burde', 'skulle', 'måtte'], |
|
'fo': ['vera', 'hava', 'verða', 'koma', 'fara', 'kunna'], |
|
'is': ['vera', 'hafa', 'verða', 'geta', 'mega', 'munu', 'skulu', 'eiga'], |
|
'got': ['wisan'], |
|
|
|
|
|
|
|
|
|
|
|
|
|
'pt': ['ser', 'estar', 'haver', 'ter', 'andar', 'ir', 'poder', 'dever', 'continuar', 'passar', 'ameaçar', |
|
'recomeçar', 'ficar', 'começar', 'voltar', 'parecer', 'acabar', 'deixar', 'vir','chegar', 'costumar', 'quer', |
|
'querer','parar','procurar','interpretar','tender', 'viver','permitir','agredir','tornar', 'interpelar'], |
|
'gl': ['ser', 'estar', 'haber', 'ter', 'ir', 'poder', 'querer', 'deber', 'vir', 'semellar', 'seguir', 'deixar', 'quedar', 'levar', 'acabar'], |
|
'es': ['ser', 'estar', 'haber', 'tener', 'ir', 'poder', 'saber', 'querer', 'deber'], |
|
'ca': ['ser', 'estar', 'haver', 'anar', 'poder', 'saber'], |
|
'fr': ['être', 'avoir', 'faire', 'aller', 'pouvoir', 'savoir', 'vouloir', 'devoir'], |
|
'it': ['essere', 'stare', 'avere', 'fare', 'andare', 'venire', 'potere', 'sapere', 'volere', 'dovere'], |
|
'ro': ['fi', 'avea', 'putea', 'ști', 'vrea', 'trebui'], |
|
|
|
'la': ['sum','habeo','fio'], |
|
'cs': ['být', 'bývat', 'bývávat'], |
|
'sk': ['byť', 'bývať', 'by'], |
|
'hsb': ['być'], |
|
|
|
|
|
|
|
'pl': ['być', 'bywać', 'by', 'zostać', 'zostawać', 'niech', 'niechby', 'niechże', 'niechaj', 'niechajże', 'to'], |
|
'uk': ['бути', 'бувати', 'би', 'б'], |
|
'be': ['быць', 'б'], |
|
'ru': ['быть', 'бы', 'б'], |
|
|
|
|
|
|
|
|
|
'orv': ['быти', 'не быти', 'бы', 'бъ'], |
|
'sl': ['biti'], |
|
'hr': ['biti', 'htjeti'], |
|
'sr': ['biti', 'hteti'], |
|
'bg': ['съм', 'бъда', 'бивам', 'би', 'да', 'ще'], |
|
'cu': ['бꙑти', 'не.бꙑти'], |
|
'lt': ['būti'], |
|
'lv': ['būt', 'kļūt', 'tikt', 'tapt'], |
|
'ga': ['is'], |
|
'gd': ['is'], |
|
'cy': ['bod', 'yn', 'wedi', 'newydd', 'heb', 'ar', 'y', 'a', 'mi', 'fe', 'am'], |
|
'br': ['bezañ'], |
|
'sq': ['kam', 'jam', 'u'], |
|
'grc': ['εἰμί'], |
|
'el': ['είμαι', 'έχω', 'πρέπει', 'θα', 'ας', 'να'], |
|
'hy': ['եմ', 'լինել', 'տալ', 'պիտի', 'պետք', 'ունեմ', 'կամ'], |
|
'kmr': ['bûn', 'hebûn'], |
|
'fa': ['است'], |
|
|
|
'sa': ['अस्', 'as', 'भू', 'bhū', 'इ', 'i', 'कृ', 'kṛ', 'शक्', 'śak'], |
|
'hi': ['है', 'था', 'रह', 'कर', 'जा', 'सक', 'पा', 'चाहिए', 'हो', 'पड़', 'लग', 'चुक', 'ले', 'दे', 'डाल', 'बैठ', 'उठ', 'रख', 'आ'], |
|
'ur': ['ہے', 'تھا', 'رہ', 'کر', 'جا', 'سک', 'پا', 'چاہیئے', 'ہو', 'پڑ', 'لگ', 'چک', 'لے', 'دے', 'بیٹھ', 'رکھ', 'آ'], |
|
|
|
'bho': ['हऽ', 'आ', 'स', 'बा', 'छी', 'भा', 'ना', 'गइल', 'रह', 'कर', 'जा', 'सक', 'पा', 'चाही', 'हो', 'पड़', 'लग', 'चुक', 'ले', 'दे', 'मार', 'डाल', 'बैठ', 'उठ', 'रख'], |
|
'mr': ['असणे', 'नाही', 'नका', 'होणे', 'शकणे', 'लागणे', 'देणे', 'येणे'], |
|
|
|
'fi': ['olla', 'ei', 'voida', 'pitää', 'saattaa', 'täytyä', 'joutua', 'aikoa', 'taitaa', 'tarvita', 'mahtaa'], |
|
'krl': ['olla', 'ei', 'voija', 'piteä'], |
|
'olo': ['olla', 'ei', 'voija', 'pidiä', 'suaha', 'rotie'], |
|
'et': ['olema', 'ei', 'ära', 'võima', 'pidama', 'saama', 'näima', 'paistma', 'tunduma', 'tohtima'], |
|
'sme': ['leat'], |
|
|
|
'sms': ['leeʹd', 'haaʹleed', 'ij', 'ni', 'õlggâd', 'urččmõš', 'iʹlla', 'i-ǥõl', 'feʹrttjed', 'pâʹstted'], |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'myv': ['улемс', 'ульнемс', 'оль', 'арась', 'а', 'аволь', 'апак', 'иля', 'эзь', 'савомс', 'савкшномс', 'эрявомс', 'кармамс', 'ли', 'штоли', 'давайте', 'давай', 'бу', 'кадык'], |
|
'mdf': ['улемс', 'оль', 'ашезь', 'аф', 'афи', 'афоль', 'апак', 'аш', 'эрявомс', 'савомс', 'сашендовомс', 'катк'], |
|
|
|
|
|
|
|
'kpv': ['лоны', 'лолыны', 'овлывлыны', 'вӧвны', 'вӧвлыны', 'вӧвлывлыны', 'оз', 'абу', 'быть', 'эм'], |
|
|
|
|
|
'koi': ['овны', 'овлыны', 'овлывлыны', 'вӧвны', 'бы', 'вермыны', 'ковны', 'позьны', 'оз', 'не', 'эм'], |
|
'hu': ['van', 'lesz', 'fog', 'volna', 'lehet', 'marad', 'elszenved', 'hoz'], |
|
|
|
'tr': ['ol', 'i', 'mi', 'değil', 'bil', 'olacak', 'olduk', 'bulun'], |
|
'kk': ['бол', 'е'], |
|
'ug': ['بول', 'ئى', 'كەت', 'بەر'], |
|
'bxr': ['бай', 'боло'], |
|
'ko': ['이+라는'], |
|
'ja': ['だ', 'た', 'ようだ', 'たい', 'いる', 'ない', 'なる', 'する', 'ある', 'おる', 'ます', 'れる', 'られる', 'すぎる', 'める', 'できる', 'しまう', 'せる', 'う', 'いく', '行く', '来る', 'ぬ', 'よう', 'くる', 'くれる', 'てる', 'そう', 'べし', 'くださる', 'もらう', 'みる', 'いただく', 'やすい', 'しれる', '始める', '続ける', 'らしい', 'みたい', 'ちゃう', 'える', 'いい', 'す', 'もらえる', 'させる', 'おく', 'いただける', 'ほしい', 'よい', 'なり', '付ける', 'にくい', '出す', 'いたす', 'っつう', 'きれる', 'でる', '切る', 'たり', 'いける', '易い', 'づらい', 'なさる', 'づける', '難い', '致す', '続く', '渡る', '抜く', '合う', 'がましい', '遅れる', '果てる', 'つくす', 'ごとし', 'がたい', 'ゆく', 'まじ', 'きる', 'や', 'いらっしゃる', 'はじめる', 'つづける', '行ける', '終わる', '終える', '損ねる', '慣れる', '忘れる', '尽くす', 'わたる', 'みたく', 'まいる', 'たがる', 'しめる', 'かかる', 'おける', '込む', '置く', '直す', '回る', '参る', 'らる', 'やる', 'まう', 'まい', 'ぬく', 'とく', 'てく', 'だす', 'じゃ', 'む', 'り', '亘る', 'ず'], |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
'ta': ['போ', 'மாட்டு', 'படு', 'வை', 'இரு', 'இல்', 'வேண்டு', 'முயல்', 'கொள்', 'விடு', 'உள்', 'வரு', 'முடி', 'வா', 'செய்', 'ஆகு', 'கூடு', 'பெறு', 'தகு', 'வரல்', 'பிடு', 'வீடு', 'என்', 'கூறு', 'கூறு', 'கொடு', 'ஆவர்', 'விரி', 'கிடை', 'அல்'], |
|
|
|
'lez': ['x̂ana', "k'an"], |
|
|
|
|
|
'lzh': ['爲', '被', '見', '儀', '宜', '須', '可', '得', '能', '足', '敢', '欲', '肯'], |
|
'zh': ['是', '为', '為'], |
|
'yue': ['係', '為'], |
|
'lus': ['nii'], |
|
'prx': ['in', 'd̪uk'], |
|
|
|
'vi': ['là'], |
|
|
|
'id': ['adalah'], |
|
'tl': ['may', 'kaya', 'sana', 'huwag'], |
|
'ifb': ['agguy', 'adi', 'gun', "'ahi"], |
|
|
|
'wbp': ['ka'], |
|
'zmu': ['yi'], |
|
|
|
'mt': ['kien', 'għad', 'għadx', 'ġa', 'se', 'ħa', 'qed'], |
|
|
|
|
|
|
|
|
|
|
|
|
|
'ar': [ |
|
'كَان', |
|
'لَيس', |
|
'لسنا', |
|
'هُوَ', |
|
'سَوفَ', |
|
'سَ', |
|
'قَد', |
|
'رُبَّمَا', |
|
'عَلَّ', |
|
'عَاد', |
|
'مَا', |
|
'هَل', |
|
'أَ' |
|
], |
|
'he': ['היה', 'הוא', 'זה'], |
|
'aii': ['ܗܵܘܹܐ', 'ܟܸܐ', 'ܟܹܐ', 'ܟܲܕ', 'ܒܸܬ', 'ܒܹܬ', 'ܒܸܕ', 'ܒ', 'ܦܵܝܫ', 'ܡܵܨܸܢ', 'ܩܲܡ'], |
|
|
|
|
|
|
|
|
|
'cop': ['ⲟⲩⲛ', 'ⲙⲛ', 'ⲙⲛⲧⲉ', 'ϣⲁⲣⲉ', 'ϣⲁ', 'ⲙⲉⲣⲉ', 'ⲙⲉ', 'ⲁ', 'ⲙⲡⲉ', 'ⲙⲡ', 'ⲛⲉⲣⲉ', 'ⲛⲉ', 'ⲛⲁ', 'ⲛⲧⲉ', 'ⲧⲁⲣⲉ', 'ⲧⲁⲣ', 'ϣⲁⲛⲧⲉ', 'ⲙⲡⲁⲧⲉ', 'ⲛⲧⲉⲣⲉ', 'ⲉⲣϣⲁⲛ', 'ⲉϣ', 'ϣ', 'ϫⲡⲓ', 'ⲛⲉϣ', 'ⲉⲣⲉ', 'ⲛⲛⲉ', 'ⲙⲁⲣⲉ', 'ⲙⲡⲣⲧⲣⲉ'], |
|
'gqa': ['ə', 'ni'], |
|
'ha': ['ce', 'ne', 'ta', 'ba'], |
|
|
|
'laj': ['bèdò', 'bìnò'], |
|
|
|
'mxx': ['à', 'yè'], |
|
|
|
|
|
'wo': ['di', 'a', 'da', 'la', 'na', 'bu', 'ngi', 'woon', 'avoir', 'être'], |
|
'yo': ['jẹ́', 'ní', 'kí', 'kìí', 'ń', 'ti', 'tí', 'yóò', 'máa', 'á', 'a', 'ó', 'yió', 'ìbá', 'ì', 'bá', 'lè', 'gbọdọ̀', 'má', 'máà'], |
|
'kfz': ['la'], |
|
'bav': ['lùu'], |
|
|
|
'mov': ['iðu:m'], |
|
|
|
'gun': ['iko', "nda'ei", "nda'ipoi", 'ĩ'], |
|
|
|
'pcm': ['na', 'be', 'bin', 'can', 'cannot', 'con', 'could', 'dey', 'do', 'don', 'fit', 'for', 'gats', 'go', 'have', 'make', 'may', 'might', 'muna', 'must', 'never', 'shall', 'should', 'will', 'would'] |
|
|
|
} |
|
if lang == 'shopen': |
|
|
|
lspecauxs = ['desu', 'kudasai', 'yo', 'sa'] |
|
for ilang in auxdict: |
|
ilspecauxs = auxdict[ilang] |
|
lspecauxs = lspecauxs + ilspecauxs |
|
else: |
|
lspecauxs = auxdict.get(lang, None) |
|
if not lspecauxs: |
|
testlevel = 5 |
|
testclass = 'Morpho' |
|
testid = 'aux-lemma' |
|
testmessage = "'%s' is not an auxiliary verb in language [%s] (there are no known approved auxiliaries in this language)" % (cols[LEMMA], lang) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=cols[ID], nodelineno=line) |
|
elif not cols[LEMMA] in lspecauxs: |
|
testlevel = 5 |
|
testclass = 'Morpho' |
|
testid = 'aux-lemma' |
|
testmessage = "'%s' is not an auxiliary verb in language [%s]" % (cols[LEMMA], lang) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=cols[ID], nodelineno=line) |
|
|
|
def validate_copula_lemmas(cols, children, nodes, line, lang): |
|
""" |
|
Verifies that the relation cop is used only with lemmas that are known to |
|
act as copulas in the given language. |
|
Parameters: |
|
'cols' ....... columns of the head node |
|
'children' ... list of ids |
|
'nodes' ...... dictionary where we can translate the node id into its |
|
CoNLL-U columns |
|
'line' ....... line number of the node within the file |
|
""" |
|
if cols[DEPREL] == 'cop' and cols[LEMMA] != '_': |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
copdict = { |
|
'en': ['be'], |
|
'af': ['is', 'wees'], |
|
'nl': ['zijn'], |
|
'de': ['sein'], |
|
'sv': ['vara'], |
|
'no': ['være', 'vere'], |
|
'da': ['være'], |
|
'fo': ['vera'], |
|
'is': ['vera'], |
|
'got': ['wisan'], |
|
'pcm': ['be', 'bin', 'can', 'con', 'dey', 'do', 'don', 'gats', 'go', 'must', 'na', 'shall', 'should', 'will' ], |
|
|
|
'pt': ['ser', 'estar'], |
|
'gl': ['ser', 'estar'], |
|
'es': ['ser', 'estar'], |
|
'ca': ['ser', 'estar'], |
|
'fr': ['être'], |
|
'it': ['essere'], |
|
'ro': ['fi'], |
|
'la': ['sum'], |
|
|
|
|
|
'cs': ['být', 'bývat', 'bývávat'], |
|
'sk': ['byť', 'bývať'], |
|
'hsb': ['być'], |
|
'pl': ['być', 'bywać', 'to'], |
|
'uk': ['бути', 'бувати'], |
|
'be': ['быць', 'гэта'], |
|
'ru': ['быть', 'это'], |
|
|
|
'orv': ['быти', 'не быти'], |
|
'sl': ['biti'], |
|
'hr': ['biti'], |
|
'sr': ['biti'], |
|
'bg': ['съм', 'бъда'], |
|
|
|
'cu': ['бꙑти', 'не.бꙑти'], |
|
'lt': ['būti'], |
|
|
|
|
|
|
|
|
|
'lv': ['būt', 'kļūt', 'tikt', 'tapt'], |
|
'ga': ['is'], |
|
'gd': ['is'], |
|
'cy': ['bod'], |
|
'br': ['bezañ'], |
|
'sq': ['jam'], |
|
'grc': ['εἰμί'], |
|
'el': ['είμαι'], |
|
'hy': ['եմ'], |
|
'kmr': ['bûn'], |
|
'fa': ['است'], |
|
|
|
'sa': ['अस्', 'as', 'भू', 'bhū'], |
|
'hi': ['है', 'था'], |
|
'ur': ['ہے', 'تھا'], |
|
'bho': ['हऽ', 'बा', 'भा'], |
|
'mr': ['असणे'], |
|
'eu': ['izan', 'egon', 'ukan'], |
|
|
|
'fi': ['olla'], |
|
'krl': ['olla'], |
|
'olo': ['olla'], |
|
'et': ['olema'], |
|
'sme': ['leat'], |
|
|
|
'sms': ['leeʹd', 'iʹlla'], |
|
|
|
|
|
|
|
|
|
|
|
'myv': ['улемс', 'ульнемс', 'оль', 'арась'], |
|
|
|
'mdf': ['улемс', 'оль', 'аш'], |
|
|
|
|
|
|
|
'kpv': ['лоны', 'лолыны', 'овлывлыны', 'вӧвны', 'вӧвлыны', 'вӧвлывлыны', 'быть', 'эм'], |
|
|
|
'koi': ['овны', 'овлыны', 'овлывлыны', 'вӧвны', 'эм'], |
|
'hu': ['van'], |
|
|
|
'tr': ['ol', 'i'], |
|
'kk': ['бол', 'е'], |
|
'ug': ['بول', 'ئى'], |
|
'bxr': ['бай', 'боло'], |
|
'ko': ['이+라는'], |
|
'ja': ['だ'], |
|
|
|
'ta': ['முயல்'], |
|
|
|
'lez': ['x̂ana'], |
|
|
|
|
|
|
|
|
|
'lzh': ['爲'], |
|
'zh': ['是', '为', '為'], |
|
'yue': ['係', '為'], |
|
'lus': ['nii'], |
|
'prx': ['in', 'd̪uk'], |
|
|
|
'vi': ['là'], |
|
|
|
'id': ['adalah'], |
|
'tl': ['may'], |
|
|
|
'zmu': ['yi'], |
|
|
|
'mt': ['kien'], |
|
'ar': ['كَان', 'لَيس', 'لسنا', 'هُوَ'], |
|
'he': ['היה', 'הוא', 'זה'], |
|
'aii': ['ܗܵܘܹܐ'], |
|
'am': ['ን'], |
|
'cop': ['ⲡⲉ', 'ⲡ'], |
|
'ha': ['ce', 'ne'], |
|
|
|
'laj': ['bèdò'], |
|
|
|
'mxx': ['à', 'yè'], |
|
|
|
'wo': ['di', 'la', 'ngi', 'être'], |
|
'yo': ['jẹ́', 'ní'], |
|
'kfz': ['la'], |
|
'bav': ['lùu'], |
|
|
|
|
|
'gun': ['iko', "nda'ei", "nda'ipoi", 'ĩ'] |
|
} |
|
if lang == 'shopen': |
|
|
|
lspeccops = ['desu'] |
|
for ilang in copdict: |
|
ilspeccops = copdict[ilang] |
|
lspeccops = lspeccops + ilspeccops |
|
else: |
|
lspeccops = copdict.get(lang, None) |
|
if not lspeccops: |
|
testlevel = 5 |
|
testclass = 'Syntax' |
|
testid = 'cop-lemma' |
|
testmessage = "'%s' is not a copula in language [%s] (there are no known approved copulas in this language)" % (cols[LEMMA], lang) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=cols[ID], nodelineno=line) |
|
elif not cols[LEMMA] in lspeccops: |
|
testlevel = 5 |
|
testclass = 'Syntax' |
|
testid = 'cop-lemma' |
|
testmessage = "'%s' is not a copula in language [%s]" % (cols[LEMMA], lang) |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, nodeid=cols[ID], nodelineno=line) |
|
|
|
def validate_lspec_annotation(tree, lang): |
|
""" |
|
Checks language-specific consequences of the annotation guidelines. |
|
""" |
|
|
|
|
|
global sentence_line |
|
node_line = sentence_line - 1 |
|
lines = {} |
|
nodes = {} |
|
children = {} |
|
for cols in tree: |
|
node_line += 1 |
|
if not is_word(cols): |
|
continue |
|
if HEAD >= len(cols): |
|
|
|
|
|
return |
|
if cols[HEAD]=='_': |
|
|
|
|
|
return |
|
try: |
|
id_ = int(cols[ID]) |
|
except ValueError: |
|
|
|
|
|
return |
|
try: |
|
head = int(cols[HEAD]) |
|
except ValueError: |
|
|
|
|
|
return |
|
|
|
lines.setdefault(cols[ID], node_line) |
|
nodes.setdefault(cols[ID], cols) |
|
children.setdefault(cols[HEAD], set()).add(cols[ID]) |
|
for cols in tree: |
|
if not is_word(cols): |
|
continue |
|
myline = lines.get(cols[ID], sentence_line) |
|
mychildren = children.get(cols[ID], []) |
|
validate_auxiliary_verbs(cols, mychildren, nodes, myline, lang) |
|
validate_copula_lemmas(cols, mychildren, nodes, myline, lang) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def validate(inp, out, args, tag_sets, known_sent_ids): |
|
global tree_counter |
|
for comments, sentence in trees(inp, tag_sets, args): |
|
tree_counter += 1 |
|
|
|
|
|
validate_ID_sequence(sentence) |
|
validate_token_ranges(sentence) |
|
if args.level > 1: |
|
validate_sent_id(comments, known_sent_ids, args.lang) |
|
if args.check_tree_text: |
|
validate_text_meta(comments, sentence) |
|
validate_root(sentence) |
|
validate_ID_references(sentence) |
|
validate_deps(sentence) |
|
validate_misc(sentence) |
|
tree = build_tree(sentence) |
|
egraph = build_egraph(sentence) |
|
if tree: |
|
if args.level > 2: |
|
validate_annotation(tree) |
|
if args.level > 4: |
|
validate_lspec_annotation(sentence, args.lang) |
|
else: |
|
testlevel = 2 |
|
testclass = 'Format' |
|
testid = 'skipped-corrupt-tree' |
|
testmessage = "Skipping annotation tests because of corrupt tree structure." |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
if egraph: |
|
if args.level > 2: |
|
validate_enhanced_annotation(egraph) |
|
validate_newlines(inp) |
|
|
|
def load_file(f_name): |
|
res=set() |
|
with io.open(f_name, 'r', encoding='utf-8') as f: |
|
for line in f: |
|
line=line.strip() |
|
if not line or line.startswith('#'): |
|
continue |
|
res.add(line) |
|
return res |
|
|
|
def load_set(f_name_ud,f_name_langspec,validate_langspec=False,validate_enhanced=False): |
|
""" |
|
Loads a list of values from the two files, and returns their |
|
set. If f_name_langspec doesn't exist, loads nothing and returns |
|
None (ie this taglist is not checked for the given language). If f_name_langspec |
|
is None, only loads the UD one. This is probably only useful for CPOS which doesn't |
|
allow language-specific extensions. Set validate_langspec=True when loading basic dependencies. |
|
That way the language specific deps will be checked to be truly extensions of UD ones. |
|
Set validate_enhanced=True when loading enhanced dependencies. They will be checked to be |
|
truly extensions of universal relations, too; but a more relaxed regular expression will |
|
be checked because enhanced relations may contain stuff that is forbidden in the basic ones. |
|
""" |
|
res=load_file(os.path.join(THISDIR,"data",f_name_ud)) |
|
|
|
|
|
if f_name_langspec is not None and f_name_langspec!=f_name_ud: |
|
path_langspec = os.path.join(THISDIR,"data",f_name_langspec) |
|
if os.path.exists(path_langspec): |
|
global curr_fname |
|
curr_fname = path_langspec |
|
l_spec=load_file(path_langspec) |
|
for v in l_spec: |
|
if validate_enhanced: |
|
|
|
|
|
|
|
if not edeprel_re.match(v): |
|
testlevel = 4 |
|
testclass = 'Enhanced' |
|
testid = 'edeprel-def-regex' |
|
testmessage = "Spurious language-specific enhanced relation '%s' - it does not match the regular expression that restricts enhanced relations." % v |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
continue |
|
elif validate_langspec: |
|
|
|
|
|
|
|
|
|
if not re.match(r"^[a-z]+(:[a-z]+)?$", v): |
|
testlevel = 4 |
|
testclass = 'Syntax' |
|
testid = 'deprel-def-regex' |
|
testmessage = "Spurious language-specific relation '%s' - in basic UD, it must match '^[a-z]+(:[a-z]+)?'." % v |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
continue |
|
if validate_langspec or validate_enhanced: |
|
try: |
|
parts=v.split(':') |
|
if parts[0] not in res and parts[0] != 'ref': |
|
testlevel = 4 |
|
testclass = 'Syntax' |
|
testid = 'deprel-def-universal-part' |
|
testmessage = "Spurious language-specific relation '%s' - not an extension of any UD relation." % v |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
continue |
|
except: |
|
testlevel = 4 |
|
testclass = 'Syntax' |
|
testid = 'deprel-def-universal-part' |
|
testmessage = "Spurious language-specific relation '%s' - not an extension of any UD relation." % v |
|
warn(testmessage, testclass, testlevel=testlevel, testid=testid, lineno=False) |
|
continue |
|
res.add(v) |
|
return res |
|
|
|
if __name__=="__main__": |
|
opt_parser = argparse.ArgumentParser(description="CoNLL-U validation script") |
|
|
|
io_group=opt_parser.add_argument_group("Input / output options") |
|
io_group.add_argument('--quiet', dest="quiet", action="store_true", default=False, help='Do not print any error messages. Exit with 0 on pass, non-zero on fail.') |
|
io_group.add_argument('--max-err', action="store", type=int, default=20, help='How many errors to output before exiting? 0 for all. Default: %(default)d.') |
|
io_group.add_argument('input', nargs='*', help='Input file name(s), or "-" or nothing for standard input.') |
|
|
|
|
|
|
|
list_group=opt_parser.add_argument_group("Tag sets","Options relevant to checking tag sets.") |
|
list_group.add_argument("--lang", action="store", required=True, default=None, help="Which langauge are we checking? If you specify this (as a two-letter code), the tags will be checked using the language-specific files in the data/ directory of the validator. It's also possible to use 'ud' for checking compliance with purely ud.") |
|
|
|
tree_group=opt_parser.add_argument_group("Tree constraints","Options for checking the validity of the tree.") |
|
tree_group.add_argument("--level", action="store", type=int, default=5, dest="level", help="Level 1: Test only CoNLL-U backbone. Level 2: UD format. Level 3: UD contents. Level 4: Language-specific labels. Level 5: Language-specific contents.") |
|
tree_group.add_argument("--multiple-roots", action="store_false", default=True, dest="single_root", help="Allow trees with several root words (single root required by default).") |
|
|
|
meta_group=opt_parser.add_argument_group("Metadata constraints","Options for checking the validity of tree metadata.") |
|
meta_group.add_argument("--no-tree-text", action="store_false", default=True, dest="check_tree_text", help="Do not test tree text. For internal use only, this test is required and on by default.") |
|
meta_group.add_argument("--no-space-after", action="store_false", default=True, dest="check_space_after", help="Do not test presence of SpaceAfter=No.") |
|
|
|
args = opt_parser.parse_args() |
|
error_counter={} |
|
tree_counter=0 |
|
|
|
|
|
if args.level < 1: |
|
print('Option --level must not be less than 1; changing from %d to 1' % args.level, file=sys.stderr) |
|
args.level = 1 |
|
|
|
|
|
|
|
|
|
if args.level < 4: |
|
args.lang = 'ud' |
|
|
|
tagsets={XPOS:None,UPOS:None,FEATS:None,DEPREL:None,DEPS:None,TOKENSWSPACE:None} |
|
|
|
if args.lang: |
|
tagsets[DEPREL]=load_set("deprel.ud","deprel."+args.lang,validate_langspec=True) |
|
|
|
|
|
|
|
|
|
tagsets[DEPS]=tagsets[DEPREL]|{"ref"}|load_set("deprel.ud","edeprel."+args.lang,validate_enhanced=True) |
|
tagsets[FEATS]=load_set("feat_val.ud","feat_val."+args.lang) |
|
tagsets[UPOS]=load_set("cpos.ud",None) |
|
tagsets[TOKENSWSPACE]=load_set("tokens_w_space.ud","tokens_w_space."+args.lang) |
|
tagsets[TOKENSWSPACE]=[re.compile(regex,re.U) for regex in tagsets[TOKENSWSPACE]] |
|
|
|
out=sys.stdout |
|
|
|
try: |
|
known_sent_ids=set() |
|
open_files=[] |
|
if args.input==[]: |
|
args.input.append('-') |
|
for fname in args.input: |
|
if fname=='-': |
|
|
|
|
|
open_files.append(sys.stdin) |
|
else: |
|
open_files.append(io.open(fname, 'r', encoding='utf-8')) |
|
for curr_fname,inp in zip(args.input,open_files): |
|
validate(inp,out,args,tagsets,known_sent_ids) |
|
except: |
|
warn('Exception caught!', 'Format') |
|
|
|
|
|
|
|
traceback.print_exc() |
|
if not error_counter: |
|
if not args.quiet: |
|
print('*** PASSED ***', file=sys.stderr) |
|
sys.exit(0) |
|
else: |
|
if not args.quiet: |
|
for k,v in sorted(error_counter.items()): |
|
print('%s errors: %d' %(k, v), file=sys.stderr) |
|
print('*** FAILED *** with %d errors'%sum(v for k,v in iter(error_counter.items())), file=sys.stderr) |
|
for f_name in sorted(warn_on_missing_files): |
|
filepath = os.path.join(THISDIR, 'data', f_name+'.'+args.lang) |
|
if not os.path.exists(filepath): |
|
print('The language-specific file %s does not exist.'%filepath, file=sys.stderr) |
|
sys.exit(1) |
|
|