index
int64 0
731k
| package
stringlengths 2
98
⌀ | name
stringlengths 1
76
| docstring
stringlengths 0
281k
⌀ | code
stringlengths 4
1.07M
⌀ | signature
stringlengths 2
42.8k
⌀ |
---|---|---|---|---|---|
0 | websocket | WebSocket | null | class WebSocket(object):
@classmethod
def is_socket(self, environ):
if 'upgrade' not in environ.get("HTTP_CONNECTION").lower():
return False
if environ.get("HTTP_UPGRADE") != "WebSocket":
return False
if not environ.get("HTTP_ORIGIN"):
return False
return True
def __init__(self, environ, socket, rfile):
# QQQ should reply Bad Request when IOError is raised above
# should only log the error message, traceback is not necessary
self.origin = environ['HTTP_ORIGIN']
self.protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'unknown')
self.path_info = environ['PATH_INFO']
self.host = environ['HTTP_HOST']
self.key1 = environ.get('HTTP_SEC_WEBSOCKET_KEY1')
self.key2 = environ.get('HTTP_SEC_WEBSOCKET_KEY2')
self.socket = socket
self.rfile = rfile
self.handshaked = False
def __repr__(self):
try:
info = ' ' + self.socket._formatinfo()
except Exception:
info = ''
return '<%s at %s%s>' % (type(self).__name__, hex(id(self)), info)
def do_handshake(self):
"""This method is called automatically in the first send() or receive()"""
assert not self.handshaked, 'Already did handshake'
if self.key1 is not None:
# version 76
if not self.key1:
message = "Missing HTTP_SEC_WEBSOCKET_KEY1 header in the request"
self._reply_400(message)
raise IOError(message)
if not self.key2:
message = "Missing HTTP_SEC_WEBSOCKET_KEY2 header in the request"
self._reply_400(message)
raise IOError(message)
headers = [
("Upgrade", "WebSocket"),
("Connection", "Upgrade"),
("Sec-WebSocket-Origin", self.origin),
("Sec-WebSocket-Protocol", self.protocol),
("Sec-WebSocket-Location", "ws://" + self.host + self.path_info),
]
self._send_reply("101 Web Socket Protocol Handshake", headers)
challenge = self._get_challenge()
self.socket.sendall(challenge)
else:
# version 75
headers = [
("Upgrade", "WebSocket"),
("Connection", "Upgrade"),
("WebSocket-Origin", self.websocket.origin),
("WebSocket-Protocol", self.websocket.protocol),
("WebSocket-Location", "ws://" + self.host + self.path_info),
]
self._send_reply("101 Web Socket Protocol Handshake", headers)
self.handshaked = True
def _send_reply(self, status, headers, message=None):
self.status = status
self.headers_sent = True
towrite = ['HTTP/1.1 %s\r\n' % self.status]
for header in headers:
towrite.append("%s: %s\r\n" % header)
towrite.append("\r\n")
if message:
towrite.append(message)
self.socket.sendall(''.join(towrite))
def _reply_400(self, message):
self._send_reply('400 Bad Request',
[('Content-Length', str(len(message))),
('Content-Type', 'text/plain')],
message)
self.socket = None
self.rfile = None
def _get_key_value(self, key_value):
key_number = int(re.sub("\\D", "", key_value))
spaces = re.subn(" ", "", key_value)[1]
if key_number % spaces != 0:
self._reply_400('Invalid key')
raise IOError("key_number %r is not an intergral multiple of spaces %r" % (key_number, spaces))
return key_number / spaces
def _get_challenge(self):
part1 = self._get_key_value(self.key1)
part2 = self._get_key_value(self.key2)
# This request should have 8 bytes of data in the body
key3 = self.rfile.read(8)
challenge = ""
challenge += struct.pack("!I", part1)
challenge += struct.pack("!I", part2)
challenge += key3
return md5(challenge).digest()
def send(self, message):
if not self.handshaked:
self.do_handshake()
if isinstance(message, str):
pass
elif isinstance(message, unicode):
message = message.encode('utf-8')
else:
raise TypeError("Expected string or unicode: %r" % (message, ))
self.socket.sendall("\x00" + message + "\xFF")
def close(self):
# XXX implement graceful close with 0xFF frame
if self.socket is not None:
try:
self.socket.close()
except Exception:
pass
self.socket = None
self.rfile = None
def _message_length(self):
# TODO: buildin security agains lengths greater than 2**31 or 2**32
length = 0
while True:
byte_str = self.rfile.read(1)
if not byte_str:
return 0
else:
byte = ord(byte_str)
if byte != 0x00:
length = length * 128 + (byte & 0x7f)
if (byte & 0x80) != 0x80:
break
return length
def _read_until(self):
bytes = []
while True:
byte = self.rfile.read(1)
if ord(byte) != 0xff:
bytes.append(byte)
else:
break
return ''.join(bytes)
def receive(self):
if not self.handshaked:
self.do_handshake()
while self.socket is not None:
frame_str = self.rfile.read(1)
if not frame_str:
self.close()
break
else:
frame_type = ord(frame_str)
if (frame_type & 0x80) == 0x00: # most significant byte is not set
if frame_type == 0x00:
bytes = self._read_until()
return bytes.decode("utf-8")
else:
self.close()
elif (frame_type & 0x80) == 0x80: # most significant byte is set
# Read binary data (forward-compatibility)
if frame_type != 0xff:
self.close()
break
else:
length = self._message_length()
if length == 0:
self.close()
break
else:
self.rfile.read(length) # discard the bytes
else:
raise IOError("Received invalid message")
def getsockname(self):
return self.socket.getsockname()
def getpeername(self):
return self.socket.getpeername()
| (environ, socket, rfile) |
1 | websocket | __init__ | null | def __init__(self, environ, socket, rfile):
# QQQ should reply Bad Request when IOError is raised above
# should only log the error message, traceback is not necessary
self.origin = environ['HTTP_ORIGIN']
self.protocol = environ.get('HTTP_SEC_WEBSOCKET_PROTOCOL', 'unknown')
self.path_info = environ['PATH_INFO']
self.host = environ['HTTP_HOST']
self.key1 = environ.get('HTTP_SEC_WEBSOCKET_KEY1')
self.key2 = environ.get('HTTP_SEC_WEBSOCKET_KEY2')
self.socket = socket
self.rfile = rfile
self.handshaked = False
| (self, environ, socket, rfile) |
2 | websocket | __repr__ | null | def __repr__(self):
try:
info = ' ' + self.socket._formatinfo()
except Exception:
info = ''
return '<%s at %s%s>' % (type(self).__name__, hex(id(self)), info)
| (self) |
3 | websocket | _get_challenge | null | def _get_challenge(self):
part1 = self._get_key_value(self.key1)
part2 = self._get_key_value(self.key2)
# This request should have 8 bytes of data in the body
key3 = self.rfile.read(8)
challenge = ""
challenge += struct.pack("!I", part1)
challenge += struct.pack("!I", part2)
challenge += key3
return md5(challenge).digest()
| (self) |
4 | websocket | _get_key_value | null | def _get_key_value(self, key_value):
key_number = int(re.sub("\\D", "", key_value))
spaces = re.subn(" ", "", key_value)[1]
if key_number % spaces != 0:
self._reply_400('Invalid key')
raise IOError("key_number %r is not an intergral multiple of spaces %r" % (key_number, spaces))
return key_number / spaces
| (self, key_value) |
5 | websocket | _message_length | null | def _message_length(self):
# TODO: buildin security agains lengths greater than 2**31 or 2**32
length = 0
while True:
byte_str = self.rfile.read(1)
if not byte_str:
return 0
else:
byte = ord(byte_str)
if byte != 0x00:
length = length * 128 + (byte & 0x7f)
if (byte & 0x80) != 0x80:
break
return length
| (self) |
6 | websocket | _read_until | null | def _read_until(self):
bytes = []
while True:
byte = self.rfile.read(1)
if ord(byte) != 0xff:
bytes.append(byte)
else:
break
return ''.join(bytes)
| (self) |
7 | websocket | _reply_400 | null | def _reply_400(self, message):
self._send_reply('400 Bad Request',
[('Content-Length', str(len(message))),
('Content-Type', 'text/plain')],
message)
self.socket = None
self.rfile = None
| (self, message) |
8 | websocket | _send_reply | null | def _send_reply(self, status, headers, message=None):
self.status = status
self.headers_sent = True
towrite = ['HTTP/1.1 %s\r\n' % self.status]
for header in headers:
towrite.append("%s: %s\r\n" % header)
towrite.append("\r\n")
if message:
towrite.append(message)
self.socket.sendall(''.join(towrite))
| (self, status, headers, message=None) |
9 | websocket | close | null | def close(self):
# XXX implement graceful close with 0xFF frame
if self.socket is not None:
try:
self.socket.close()
except Exception:
pass
self.socket = None
self.rfile = None
| (self) |
10 | websocket | do_handshake | This method is called automatically in the first send() or receive() | def do_handshake(self):
"""This method is called automatically in the first send() or receive()"""
assert not self.handshaked, 'Already did handshake'
if self.key1 is not None:
# version 76
if not self.key1:
message = "Missing HTTP_SEC_WEBSOCKET_KEY1 header in the request"
self._reply_400(message)
raise IOError(message)
if not self.key2:
message = "Missing HTTP_SEC_WEBSOCKET_KEY2 header in the request"
self._reply_400(message)
raise IOError(message)
headers = [
("Upgrade", "WebSocket"),
("Connection", "Upgrade"),
("Sec-WebSocket-Origin", self.origin),
("Sec-WebSocket-Protocol", self.protocol),
("Sec-WebSocket-Location", "ws://" + self.host + self.path_info),
]
self._send_reply("101 Web Socket Protocol Handshake", headers)
challenge = self._get_challenge()
self.socket.sendall(challenge)
else:
# version 75
headers = [
("Upgrade", "WebSocket"),
("Connection", "Upgrade"),
("WebSocket-Origin", self.websocket.origin),
("WebSocket-Protocol", self.websocket.protocol),
("WebSocket-Location", "ws://" + self.host + self.path_info),
]
self._send_reply("101 Web Socket Protocol Handshake", headers)
self.handshaked = True
| (self) |
11 | websocket | getpeername | null | def getpeername(self):
return self.socket.getpeername()
| (self) |
12 | websocket | getsockname | null | def getsockname(self):
return self.socket.getsockname()
| (self) |
13 | websocket | receive | null | def receive(self):
if not self.handshaked:
self.do_handshake()
while self.socket is not None:
frame_str = self.rfile.read(1)
if not frame_str:
self.close()
break
else:
frame_type = ord(frame_str)
if (frame_type & 0x80) == 0x00: # most significant byte is not set
if frame_type == 0x00:
bytes = self._read_until()
return bytes.decode("utf-8")
else:
self.close()
elif (frame_type & 0x80) == 0x80: # most significant byte is set
# Read binary data (forward-compatibility)
if frame_type != 0xff:
self.close()
break
else:
length = self._message_length()
if length == 0:
self.close()
break
else:
self.rfile.read(length) # discard the bytes
else:
raise IOError("Received invalid message")
| (self) |
14 | websocket | send | null | def send(self, message):
if not self.handshaked:
self.do_handshake()
if isinstance(message, str):
pass
elif isinstance(message, unicode):
message = message.encode('utf-8')
else:
raise TypeError("Expected string or unicode: %r" % (message, ))
self.socket.sendall("\x00" + message + "\xFF")
| (self, message) |
15 | builtins | str | str(object='') -> str
str(bytes_or_buffer[, encoding[, errors]]) -> str
Create a new string object from the given object. If encoding or
errors is specified, then the object must expose a data buffer
that will be decoded using the given encoding and error handler.
Otherwise, returns the result of object.__str__() (if defined)
or repr(object).
encoding defaults to sys.getdefaultencoding().
errors defaults to 'strict'. | from builtins import str
| null |
17 | moscow_yandex_transport | YandexMapsRequester | null | class YandexMapsRequester(object):
def __init__(self, user_agent: str = None):
"""
:type user_agent: set user agent for data requester
"""
self._config = CONFIG
if user_agent is not None:
CONFIG['headers']['User-Agent'] = user_agent
self.set_new_session()
def get_stop_info(self, stop_id):
""""
get transport data for stop_id in json
"""
self._config["params"]["id"] = f"stop__{stop_id}"
req = requests.get(self._config["uri"], params=self._config["params"], cookies=self._config["cookies"],
headers=self._config["headers"])
return loads(req.content.decode('utf8'))
def set_new_session(self):
"""
Create new http session to Yandex, with valid csrf_token and session_id
"""
ya_request = requests.get(url=self._config["init_url"], headers=self._config["headers"])
reply = ya_request.content.decode('utf8')
self._config["params"][CSRF_TOKEN_KEY] = re.search(f'"{CSRF_TOKEN_KEY}":"(\w+.\w+)"', reply).group(1)
self._config["cookies"] = dict(ya_request.cookies)
self._config["params"][SESSION_KEY] = re.search(f'"{SESSION_KEY}":"(\d+.\d+)"', reply).group(1)
| (user_agent: str = None) |
18 | moscow_yandex_transport | __init__ |
:type user_agent: set user agent for data requester
| def __init__(self, user_agent: str = None):
"""
:type user_agent: set user agent for data requester
"""
self._config = CONFIG
if user_agent is not None:
CONFIG['headers']['User-Agent'] = user_agent
self.set_new_session()
| (self, user_agent: Optional[str] = None) |
19 | moscow_yandex_transport | get_stop_info | "
get transport data for stop_id in json
| def get_stop_info(self, stop_id):
""""
get transport data for stop_id in json
"""
self._config["params"]["id"] = f"stop__{stop_id}"
req = requests.get(self._config["uri"], params=self._config["params"], cookies=self._config["cookies"],
headers=self._config["headers"])
return loads(req.content.decode('utf8'))
| (self, stop_id) |
20 | moscow_yandex_transport | set_new_session |
Create new http session to Yandex, with valid csrf_token and session_id
| def set_new_session(self):
"""
Create new http session to Yandex, with valid csrf_token and session_id
"""
ya_request = requests.get(url=self._config["init_url"], headers=self._config["headers"])
reply = ya_request.content.decode('utf8')
self._config["params"][CSRF_TOKEN_KEY] = re.search(f'"{CSRF_TOKEN_KEY}":"(\w+.\w+)"', reply).group(1)
self._config["cookies"] = dict(ya_request.cookies)
self._config["params"][SESSION_KEY] = re.search(f'"{SESSION_KEY}":"(\d+.\d+)"', reply).group(1)
| (self) |
21 | json | loads | Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance
containing a JSON document) to a Python object.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders. If ``object_hook``
is also defined, the ``object_pairs_hook`` takes priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
| def loads(s, *, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``s`` (a ``str``, ``bytes`` or ``bytearray`` instance
containing a JSON document) to a Python object.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders. If ``object_hook``
is also defined, the ``object_pairs_hook`` takes priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
"""
if isinstance(s, str):
if s.startswith('\ufeff'):
raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)",
s, 0)
else:
if not isinstance(s, (bytes, bytearray)):
raise TypeError(f'the JSON object must be str, bytes or bytearray, '
f'not {s.__class__.__name__}')
s = s.decode(detect_encoding(s), 'surrogatepass')
if (cls is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(**kw).decode(s)
| (s, *, cls=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, object_pairs_hook=None, **kw) |
26 | pycookiecheat.chrome | chrome_cookies | Retrieve cookies from Chrome/Chromium on OSX or Linux.
Args:
url: Domain from which to retrieve cookies, starting with http(s)
cookie_file: Path to alternate file to search for cookies
browser: Name of the browser's cookies to read ('Chrome' or 'Chromium')
curl_cookie_file: Path to save the cookie file to be used with cURL
password: Optional system password
Returns:
Dictionary of cookie values for URL
| def chrome_cookies(
url: str,
cookie_file: t.Optional[str] = None,
browser: str = "Chrome",
curl_cookie_file: t.Optional[str] = None,
password: t.Optional[t.Union[bytes, str]] = None,
) -> dict:
"""Retrieve cookies from Chrome/Chromium on OSX or Linux.
Args:
url: Domain from which to retrieve cookies, starting with http(s)
cookie_file: Path to alternate file to search for cookies
browser: Name of the browser's cookies to read ('Chrome' or 'Chromium')
curl_cookie_file: Path to save the cookie file to be used with cURL
password: Optional system password
Returns:
Dictionary of cookie values for URL
"""
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme:
domain = parsed_url.netloc
else:
raise urllib.error.URLError("You must include a scheme with your URL.")
# If running Chrome on OSX
if sys.platform == "darwin":
config = get_osx_config(browser)
elif sys.platform.startswith("linux"):
config = get_linux_config(browser)
else:
raise OSError("This script only works on OSX or Linux.")
config.update(
{"init_vector": b" " * 16, "length": 16, "salt": b"saltysalt"}
)
if cookie_file:
cookie_file = str(pathlib.Path(cookie_file).expanduser())
else:
cookie_file = str(pathlib.Path(config["cookie_file"]).expanduser())
if isinstance(password, bytes):
config["my_pass"] = password
elif isinstance(password, str):
config["my_pass"] = password.encode("utf8")
elif isinstance(config["my_pass"], str):
config["my_pass"] = config["my_pass"].encode("utf8")
kdf = PBKDF2HMAC(
algorithm=SHA1(),
iterations=config["iterations"],
length=config["length"],
salt=config["salt"],
)
enc_key = kdf.derive(config["my_pass"])
try:
conn = sqlite3.connect("file:{}?mode=ro".format(cookie_file), uri=True)
except sqlite3.OperationalError:
print("Unable to connect to cookie_file at: {}\n".format(cookie_file))
raise
conn.row_factory = sqlite3.Row
# Check whether the column name is `secure` or `is_secure`
secure_column_name = "is_secure"
for (
sl_no,
column_name,
data_type,
is_null,
default_val,
pk,
) in conn.execute("PRAGMA table_info(cookies)"):
if column_name == "secure":
secure_column_name = "secure AS is_secure"
break
sql = (
"select host_key, path, "
+ secure_column_name
+ ", expires_utc, name, value, encrypted_value "
"from cookies where host_key like ?"
)
cookies: list[Cookie] = []
for host_key in generate_host_keys(domain):
for db_row in conn.execute(sql, (host_key,)):
# if there is a not encrypted value or if the encrypted value
# doesn't start with the 'v1[01]' prefix, return v
row = dict(db_row)
if not row["value"] and (
row["encrypted_value"][:3] in {b"v10", b"v11"}
):
row["value"] = chrome_decrypt(
row["encrypted_value"],
key=enc_key,
init_vector=config["init_vector"],
)
del row["encrypted_value"]
cookies.append(Cookie(**row))
conn.rollback()
if curl_cookie_file:
with open(curl_cookie_file, "w") as text_file:
for c in cookies:
print(c.as_cookie_file_line(), file=text_file)
return {c.name: c.value for c in cookies}
| (url: str, cookie_file: Optional[str] = None, browser: str = 'Chrome', curl_cookie_file: Optional[str] = None, password: Union[bytes, str, NoneType] = None) -> dict |
29 | pycookiecheat.firefox | firefox_cookies | Retrieve cookies from Chrome/Chromium on OSX or Linux.
Args:
url: Domain from which to retrieve cookies, starting with http(s)
profile_name: Name (or glob pattern) of the Firefox profile to search
for cookies -- if none given it will find the configured
default profile
browser: Name of the browser's cookies to read (must be 'Firefox')
curl_cookie_file: Path to save the cookie file to be used with cURL
Returns:
Dictionary of cookie values for URL
| def firefox_cookies(
url: str,
profile_name: Optional[str] = None,
browser: str = "Firefox",
curl_cookie_file: Optional[str] = None,
) -> Dict[str, str]:
"""Retrieve cookies from Chrome/Chromium on OSX or Linux.
Args:
url: Domain from which to retrieve cookies, starting with http(s)
profile_name: Name (or glob pattern) of the Firefox profile to search
for cookies -- if none given it will find the configured
default profile
browser: Name of the browser's cookies to read (must be 'Firefox')
curl_cookie_file: Path to save the cookie file to be used with cURL
Returns:
Dictionary of cookie values for URL
"""
parsed_url = urllib.parse.urlparse(url)
if parsed_url.scheme:
domain = parsed_url.netloc
else:
raise urllib.error.URLError("You must include a scheme with your URL.")
if sys.platform.startswith("linux"):
os = "linux"
elif sys.platform == "darwin":
os = "osx"
elif sys.platform == "win32":
os = "windows"
else:
raise OSError(
"This script only works on "
+ ", ".join(FIREFOX_OS_PROFILE_DIRS.keys())
)
profiles_dir = _get_profiles_dir_for_os(os, browser)
cookies: list[Cookie] = []
with tempfile.TemporaryDirectory() as tmp_dir:
db_file = _load_firefox_cookie_db(
profiles_dir, Path(tmp_dir), profile_name
)
for host_key in generate_host_keys(domain):
with sqlite3.connect(db_file) as con:
con.row_factory = sqlite3.Row
res = con.execute(FIREFOX_COOKIE_SELECT_SQL, (host_key,))
for row in res.fetchall():
cookies.append(Cookie(**row))
if curl_cookie_file:
with open(curl_cookie_file, "w") as text_file:
for c in cookies:
print(c.as_cookie_file_line(), file=text_file)
return {c.name: c.value for c in cookies}
| (url: str, profile_name: Optional[str] = None, browser: str = 'Firefox', curl_cookie_file: Optional[str] = None) -> Dict[str, str] |
30 | arpeggio | And |
This predicate will succeed if the specified expression matches current
input.
| class And(SyntaxPredicate):
"""
This predicate will succeed if the specified expression matches current
input.
"""
def _parse(self, parser):
c_pos = parser.position
for e in self.nodes:
try:
e.parse(parser)
except NoMatch:
parser.position = c_pos
raise
parser.position = c_pos
| (*elements, **kwargs) |
31 | arpeggio | __init__ | null | def __init__(self, *elements, **kwargs):
if len(elements) == 1:
elements = elements[0]
self.elements = elements
self.rule_name = kwargs.get('rule_name', '')
self.root = kwargs.get('root', False)
nodes = kwargs.get('nodes', [])
if not hasattr(nodes, '__iter__'):
nodes = [nodes]
self.nodes = nodes
if 'suppress' in kwargs:
self.suppress = kwargs['suppress']
# Memoization. Every node cache the parsing results for the given input
# positions.
self._result_cache = {} # position -> parse tree at the position
| (self, *elements, **kwargs) |
32 | arpeggio | _clear_cache |
Clears memoization cache. Should be called on input change and end
of parsing.
Args:
processed (set): Set of processed nodes to prevent infinite loops.
| def _clear_cache(self, processed=None):
"""
Clears memoization cache. Should be called on input change and end
of parsing.
Args:
processed (set): Set of processed nodes to prevent infinite loops.
"""
self._result_cache = {}
if not processed:
processed = set()
for node in self.nodes:
if node not in processed:
processed.add(node)
node._clear_cache(processed)
| (self, processed=None) |
33 | arpeggio | _parse | null | def _parse(self, parser):
c_pos = parser.position
for e in self.nodes:
try:
e.parse(parser)
except NoMatch:
parser.position = c_pos
raise
parser.position = c_pos
| (self, parser) |
34 | arpeggio | parse | null | def parse(self, parser):
if parser.debug:
name = self.name
if name.startswith('__asgn'):
name = "{}[{}]".format(self.name, self._attr_name)
parser.dprint(">> Matching rule {}{} at position {} => {}"
.format(name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()), 1)
# Current position could change in recursive calls
# so save it.
c_pos = parser.position
# Memoization.
# If this position is already parsed by this parser expression use
# the result
if parser.memoization:
try:
result, new_pos = self._result_cache[c_pos]
parser.position = new_pos
parser.cache_hits += 1
if parser.debug:
parser.dprint(
"** Cache hit for [{}, {}] = '{}' : new_pos={}"
.format(name, c_pos, text(result), text(new_pos)))
parser.dprint(
"<<+ Matched rule {} at position {}"
.format(name, new_pos), -1)
# If NoMatch is recorded at this position raise.
if result is NOMATCH_MARKER:
raise parser.nm
# else return cached result
return result
except KeyError:
parser.cache_misses += 1
# Remember last parsing expression and set this as
# the new last.
last_pexpression = parser.last_pexpression
parser.last_pexpression = self
if self.rule_name:
# If we are entering root rule
# remember previous root rule name and set
# this one on the parser to be available for
# debugging messages
previous_root_rule_name = parser.in_rule
parser.in_rule = self.rule_name
try:
result = self._parse(parser)
if self.suppress or (type(result) is list and
result and result[0] is None):
result = None
except NoMatch:
parser.position = c_pos # Backtracking
# Memoize NoMatch at this position for this rule
if parser.memoization:
self._result_cache[c_pos] = (NOMATCH_MARKER, c_pos)
raise
finally:
# Recover last parsing expression.
parser.last_pexpression = last_pexpression
if parser.debug:
parser.dprint("<<{} rule {}{} at position {} => {}"
.format("- Not matched"
if parser.position is c_pos
else "+ Matched",
name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()), -1)
# If leaving root rule restore previous root rule name.
if self.rule_name:
parser.in_rule = previous_root_rule_name
# For root rules flatten non-terminal/list
if self.root and result and not isinstance(result, Terminal):
if not isinstance(result, NonTerminal):
result = flatten(result)
# Tree reduction will eliminate Non-terminal with single child.
if parser.reduce_tree and len(result) == 1:
result = result[0]
# If the result is not parse tree node it must be a plain list
# so create a new NonTerminal.
if not isinstance(result, ParseTreeNode):
result = NonTerminal(self, result)
# Result caching for use by memoization.
if parser.memoization:
self._result_cache[c_pos] = (result, parser.position)
return result
| (self, parser) |
35 | arpeggio | ArpeggioError |
Base class for arpeggio errors.
| class ArpeggioError(Exception):
"""
Base class for arpeggio errors.
"""
def __init__(self, message):
self.message = message
def __str__(self):
return repr(self.message)
| (message) |
36 | arpeggio | __init__ | null | def __init__(self, message):
self.message = message
| (self, message) |
37 | arpeggio | __str__ | null | def __str__(self):
return repr(self.message)
| (self) |
38 | arpeggio | Combine |
This decorator defines pexpression that represents a lexeme rule.
This rules will always return a Terminal parse tree node.
Whitespaces will be preserved. Comments will not be matched.
| class Combine(Decorator):
"""
This decorator defines pexpression that represents a lexeme rule.
This rules will always return a Terminal parse tree node.
Whitespaces will be preserved. Comments will not be matched.
"""
def _parse(self, parser):
results = []
oldin_lex_rule = parser.in_lex_rule
parser.in_lex_rule = True
c_pos = parser.position
try:
for parser_model_node in self.nodes:
results.append(parser_model_node.parse(parser))
results = flatten(results)
# Create terminal from result
return Terminal(self, c_pos,
"".join([x.flat_str() for x in results]))
except NoMatch:
parser.position = c_pos # Backtracking
raise
finally:
parser.in_lex_rule = oldin_lex_rule
| (*elements, **kwargs) |
41 | arpeggio | _parse | null | def _parse(self, parser):
results = []
oldin_lex_rule = parser.in_lex_rule
parser.in_lex_rule = True
c_pos = parser.position
try:
for parser_model_node in self.nodes:
results.append(parser_model_node.parse(parser))
results = flatten(results)
# Create terminal from result
return Terminal(self, c_pos,
"".join([x.flat_str() for x in results]))
except NoMatch:
parser.position = c_pos # Backtracking
raise
finally:
parser.in_lex_rule = oldin_lex_rule
| (self, parser) |
43 | arpeggio | CrossRef |
Used for rule reference resolving.
| class CrossRef(object):
'''
Used for rule reference resolving.
'''
def __init__(self, target_rule_name, position=-1):
self.target_rule_name = target_rule_name
self.position = position
| (target_rule_name, position=-1) |
44 | arpeggio | __init__ | null | def __init__(self, target_rule_name, position=-1):
self.target_rule_name = target_rule_name
self.position = position
| (self, target_rule_name, position=-1) |
45 | arpeggio | DebugPrinter |
Mixin class for adding debug print support.
Attributes:
debug (bool): If true debugging messages will be printed.
_current_indent(int): Current indentation level for prints.
| class DebugPrinter(object):
"""
Mixin class for adding debug print support.
Attributes:
debug (bool): If true debugging messages will be printed.
_current_indent(int): Current indentation level for prints.
"""
def __init__(self, **kwargs):
self.debug = kwargs.pop("debug", False)
self.file = kwargs.pop("file", sys.stdout)
self._current_indent = 0
super(DebugPrinter, self).__init__(**kwargs)
def dprint(self, message, indent_change=0):
"""
Handle debug message. Print to the stream specified by the 'file'
keyword argument at the current indentation level. Default stream is
stdout.
"""
if indent_change < 0:
self._current_indent += indent_change
print(("%s%s" % (" " * self._current_indent, message)),
file=self.file)
if indent_change > 0:
self._current_indent += indent_change
| (**kwargs) |
46 | arpeggio | __init__ | null | def __init__(self, **kwargs):
self.debug = kwargs.pop("debug", False)
self.file = kwargs.pop("file", sys.stdout)
self._current_indent = 0
super(DebugPrinter, self).__init__(**kwargs)
| (self, **kwargs) |
47 | arpeggio | dprint |
Handle debug message. Print to the stream specified by the 'file'
keyword argument at the current indentation level. Default stream is
stdout.
| def dprint(self, message, indent_change=0):
"""
Handle debug message. Print to the stream specified by the 'file'
keyword argument at the current indentation level. Default stream is
stdout.
"""
if indent_change < 0:
self._current_indent += indent_change
print(("%s%s" % (" " * self._current_indent, message)),
file=self.file)
if indent_change > 0:
self._current_indent += indent_change
| (self, message, indent_change=0) |
48 | arpeggio | Decorator |
Decorator are special kind of parsing expression used to mark
a containing pexpression and give it some special semantics.
For example, decorators are used to mark pexpression as lexical
rules (see :class:Lex).
| class Decorator(ParsingExpression):
"""
Decorator are special kind of parsing expression used to mark
a containing pexpression and give it some special semantics.
For example, decorators are used to mark pexpression as lexical
rules (see :class:Lex).
"""
| (*elements, **kwargs) |
52 | arpeggio | EOF | null | def EOF():
return EndOfFile()
| () |
53 | arpeggio | Empty |
This predicate will always succeed without consuming input.
| class Empty(SyntaxPredicate):
"""
This predicate will always succeed without consuming input.
"""
def _parse(self, parser):
pass
| (*elements, **kwargs) |
56 | arpeggio | _parse | null | def _parse(self, parser):
pass
| (self, parser) |
58 | arpeggio | EndOfFile |
The Match class that will succeed in case end of input is reached.
| class EndOfFile(Match):
"""
The Match class that will succeed in case end of input is reached.
"""
def __init__(self):
super(EndOfFile, self).__init__("EOF")
@property
def name(self):
return "EOF"
def _parse(self, parser):
c_pos = parser.position
if len(parser.input) == c_pos:
return Terminal(EOF(), c_pos, '', suppress=True)
else:
if parser.debug:
parser.dprint("!! EOF not matched.")
parser._nm_raise(self, c_pos, parser)
| () |
59 | arpeggio | __init__ | null | def __init__(self):
super(EndOfFile, self).__init__("EOF")
| (self) |
61 | arpeggio | _parse | null | def _parse(self, parser):
c_pos = parser.position
if len(parser.input) == c_pos:
return Terminal(EOF(), c_pos, '', suppress=True)
else:
if parser.debug:
parser.dprint("!! EOF not matched.")
parser._nm_raise(self, c_pos, parser)
| (self, parser) |
62 | arpeggio | _parse_comments | Parse comments. | def _parse_comments(self, parser):
"""Parse comments."""
try:
parser.in_parse_comments = True
if parser.comments_model:
try:
while True:
# TODO: Consumed whitespaces and comments should be
# attached to the first match ahead.
parser.comments.append(
parser.comments_model.parse(parser))
if parser.skipws:
# Whitespace skipping
pos = parser.position
ws = parser.ws
i = parser.input
length = len(i)
while pos < length and i[pos] in ws:
pos += 1
parser.position = pos
except NoMatch:
# NoMatch in comment matching is perfectly
# legal and no action should be taken.
pass
finally:
parser.in_parse_comments = False
| (self, parser) |
63 | arpeggio | parse | null | def parse(self, parser):
if parser.skipws and not parser.in_lex_rule:
# Whitespace skipping
pos = parser.position
ws = parser.ws
i = parser.input
length = len(i)
while pos < length and i[pos] in ws:
pos += 1
parser.position = pos
if parser.debug:
parser.dprint(
"?? Try match rule {}{} at position {} => {}"
.format(self.name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()))
if parser.skipws and parser.position in parser.comment_positions:
# Skip comments if already parsed.
parser.position = parser.comment_positions[parser.position]
else:
if not parser.in_parse_comments and not parser.in_lex_rule:
comment_start = parser.position
self._parse_comments(parser)
parser.comment_positions[comment_start] = parser.position
result = self._parse(parser)
if not self.suppress:
return result
| (self, parser) |
64 | arpeggio | GrammarError |
Error raised during parser building phase used to indicate error in the
grammar definition.
| class GrammarError(ArpeggioError):
"""
Error raised during parser building phase used to indicate error in the
grammar definition.
"""
| (message) |
67 | arpeggio | Kwd |
A specialization of StrMatch to specify keywords of the language.
| class Kwd(StrMatch):
"""
A specialization of StrMatch to specify keywords of the language.
"""
def __init__(self, to_match):
super(Kwd, self).__init__(to_match)
self.to_match = to_match
self.root = True
self.rule_name = 'keyword'
| (to_match) |
68 | arpeggio | __eq__ | null | def __eq__(self, other):
return self.to_match == text(other)
| (self, other) |
69 | arpeggio | __hash__ | null | def __hash__(self):
return hash(self.to_match)
| (self) |
70 | arpeggio | __init__ | null | def __init__(self, to_match):
super(Kwd, self).__init__(to_match)
self.to_match = to_match
self.root = True
self.rule_name = 'keyword'
| (self, to_match) |
71 | arpeggio | __str__ | null | def __str__(self):
return self.to_match
| (self) |
72 | arpeggio | __unicode__ | null | def __unicode__(self):
return self.__str__()
| (self) |
74 | arpeggio | _parse | null | def _parse(self, parser):
c_pos = parser.position
input_frag = parser.input[c_pos:c_pos+len(self.to_match)]
if self.ignore_case:
match = input_frag.lower() == self.to_match.lower()
else:
match = input_frag == self.to_match
if match:
if parser.debug:
parser.dprint(
"++ Match '{}' at {} => '{}'"
.format(self.to_match, c_pos,
parser.context(len(self.to_match))))
parser.position += len(self.to_match)
# If this match is inside sequence than mark for suppression
suppress = type(parser.last_pexpression) is Sequence
return Terminal(self, c_pos, self.to_match, suppress=suppress)
else:
if parser.debug:
parser.dprint(
"-- No match '{}' at {} => '{}'"
.format(self.to_match, c_pos,
parser.context(len(self.to_match))))
parser._nm_raise(self, c_pos, parser)
| (self, parser) |
77 | arpeggio | Match |
Base class for all classes that will try to match something from the input.
| class Match(ParsingExpression):
"""
Base class for all classes that will try to match something from the input.
"""
def __init__(self, rule_name, root=False, **kwargs):
super(Match, self).__init__(rule_name=rule_name, root=root, **kwargs)
@property
def name(self):
if self.root:
return "%s=%s(%s)" % (self.rule_name, self.__class__.__name__,
self.to_match)
else:
return "%s(%s)" % (self.__class__.__name__, self.to_match)
def _parse_comments(self, parser):
"""Parse comments."""
try:
parser.in_parse_comments = True
if parser.comments_model:
try:
while True:
# TODO: Consumed whitespaces and comments should be
# attached to the first match ahead.
parser.comments.append(
parser.comments_model.parse(parser))
if parser.skipws:
# Whitespace skipping
pos = parser.position
ws = parser.ws
i = parser.input
length = len(i)
while pos < length and i[pos] in ws:
pos += 1
parser.position = pos
except NoMatch:
# NoMatch in comment matching is perfectly
# legal and no action should be taken.
pass
finally:
parser.in_parse_comments = False
def parse(self, parser):
if parser.skipws and not parser.in_lex_rule:
# Whitespace skipping
pos = parser.position
ws = parser.ws
i = parser.input
length = len(i)
while pos < length and i[pos] in ws:
pos += 1
parser.position = pos
if parser.debug:
parser.dprint(
"?? Try match rule {}{} at position {} => {}"
.format(self.name,
" in {}".format(parser.in_rule)
if parser.in_rule else "",
parser.position,
parser.context()))
if parser.skipws and parser.position in parser.comment_positions:
# Skip comments if already parsed.
parser.position = parser.comment_positions[parser.position]
else:
if not parser.in_parse_comments and not parser.in_lex_rule:
comment_start = parser.position
self._parse_comments(parser)
parser.comment_positions[comment_start] = parser.position
result = self._parse(parser)
if not self.suppress:
return result
| (rule_name, root=False, **kwargs) |
78 | arpeggio | __init__ | null | def __init__(self, rule_name, root=False, **kwargs):
super(Match, self).__init__(rule_name=rule_name, root=root, **kwargs)
| (self, rule_name, root=False, **kwargs) |
82 | arpeggio | NoMatch |
Exception raised by the Match classes during parsing to indicate that the
match is not successful.
Args:
rules (list of ParsingExpression): Rules that are tried at the position
of the exception.
position (int): A position in the input stream where exception
occurred.
parser (Parser): An instance of a parser.
| class NoMatch(Exception):
"""
Exception raised by the Match classes during parsing to indicate that the
match is not successful.
Args:
rules (list of ParsingExpression): Rules that are tried at the position
of the exception.
position (int): A position in the input stream where exception
occurred.
parser (Parser): An instance of a parser.
"""
def __init__(self, rules, position, parser):
self.rules = rules
self.position = position
self.parser = parser
def eval_attrs(self):
"""
Call this to evaluate `message`, `context`, `line` and `col`. Called by __str__.
"""
def rule_to_exp_str(rule):
if hasattr(rule, '_exp_str'):
# Rule may override expected report string
return rule._exp_str
elif rule.root:
return rule.rule_name
elif isinstance(rule, Match) and \
not isinstance(rule, EndOfFile):
return "'{}'".format(rule.to_match.replace('\n', '\\n'))
else:
return rule.name
if not self.rules:
self.message = "Not expected input"
else:
what_is_expected = OrderedDict.fromkeys(
["{}".format(rule_to_exp_str(r)) for r in self.rules])
what_str = " or ".join(what_is_expected)
self.message = "Expected {}".format(what_str)
self.context = self.parser.context(position=self.position)
self.line, self.col = self.parser.pos_to_linecol(self.position)
def __str__(self):
self.eval_attrs()
return "{} at position {}{} => '{}'."\
.format(self.message,
"{}:".format(self.parser.file_name)
if self.parser.file_name else "",
(self.line, self.col),
self.context)
def __unicode__(self):
return self.__str__()
| (rules, position, parser) |
83 | arpeggio | __init__ | null | def __init__(self, rules, position, parser):
self.rules = rules
self.position = position
self.parser = parser
| (self, rules, position, parser) |
84 | arpeggio | __str__ | null | def __str__(self):
self.eval_attrs()
return "{} at position {}{} => '{}'."\
.format(self.message,
"{}:".format(self.parser.file_name)
if self.parser.file_name else "",
(self.line, self.col),
self.context)
| (self) |
86 | arpeggio | eval_attrs |
Call this to evaluate `message`, `context`, `line` and `col`. Called by __str__.
| def eval_attrs(self):
"""
Call this to evaluate `message`, `context`, `line` and `col`. Called by __str__.
"""
def rule_to_exp_str(rule):
if hasattr(rule, '_exp_str'):
# Rule may override expected report string
return rule._exp_str
elif rule.root:
return rule.rule_name
elif isinstance(rule, Match) and \
not isinstance(rule, EndOfFile):
return "'{}'".format(rule.to_match.replace('\n', '\\n'))
else:
return rule.name
if not self.rules:
self.message = "Not expected input"
else:
what_is_expected = OrderedDict.fromkeys(
["{}".format(rule_to_exp_str(r)) for r in self.rules])
what_str = " or ".join(what_is_expected)
self.message = "Expected {}".format(what_str)
self.context = self.parser.context(position=self.position)
self.line, self.col = self.parser.pos_to_linecol(self.position)
| (self) |
87 | arpeggio | NonTerminal |
Non-leaf node of the Parse Tree. Represents language syntax construction.
At the same time used in ParseTreeNode navigation expressions.
See test_ptnode_navigation_expressions.py for examples of navigation
expressions.
Attributes:
nodes (list of ParseTreeNode): Children parse tree nodes.
_filtered (bool): Is this NT a dynamically created filtered NT.
This is used internally.
| class NonTerminal(ParseTreeNode, list):
"""
Non-leaf node of the Parse Tree. Represents language syntax construction.
At the same time used in ParseTreeNode navigation expressions.
See test_ptnode_navigation_expressions.py for examples of navigation
expressions.
Attributes:
nodes (list of ParseTreeNode): Children parse tree nodes.
_filtered (bool): Is this NT a dynamically created filtered NT.
This is used internally.
"""
__slots__ = ['rule', 'rule_name', 'position', 'error', 'comments',
'_filtered', '_expr_cache']
def __init__(self, rule, nodes, error=False, _filtered=False):
# Inherit position from the first child node
position = nodes[0].position if nodes else 0
super(NonTerminal, self).__init__(rule, position, error)
self.extend(flatten([nodes]))
self._filtered = _filtered
@property
def value(self):
"""Terminal protocol."""
return text(self)
@property
def desc(self):
return self.name
@property
def position_end(self):
return self[-1].position_end if self else self.position
def flat_str(self):
"""
Return flatten string representation.
"""
return "".join([x.flat_str() for x in self])
def __str__(self):
return " | ".join([text(x) for x in self])
def __unicode__(self):
return self.__str__()
def __repr__(self):
return "[ %s ]" % ", ".join([repr(x) for x in self])
def tree_str(self, indent=0):
return '{}\n{}'.format(super(NonTerminal, self).tree_str(indent),
'\n'.join([c.tree_str(indent + 1)
for c in self]))
def __getattr__(self, rule_name):
"""
Find a child (non)terminal by the rule name.
Args:
rule_name(str): The name of the rule that is referenced from
this node rule.
"""
# Prevent infinite recursion
if rule_name in ['_expr_cache', '_filtered', 'rule', 'rule_name',
'position', 'append', 'extend']:
raise AttributeError
try:
# First check the cache
if rule_name in self._expr_cache:
return self._expr_cache[rule_name]
except AttributeError:
# Navigation expression cache. Used for lookup by rule name.
self._expr_cache = {}
# If result is not found in the cache collect all nodes
# with the given rule name and create new NonTerminal
# and cache it for later access.
nodes = []
rule = None
for n in self:
if self._filtered:
# For filtered NT rule_name is a rule on
# each of its children
for m in n:
if m.rule_name == rule_name:
nodes.append(m)
rule = m.rule
else:
if n.rule_name == rule_name:
nodes.append(n)
rule = n.rule
if rule is None:
# If rule is not found resort to default behavior
return self.__getattribute__(rule_name)
result = NonTerminal(rule=rule, nodes=nodes, _filtered=True)
self._expr_cache[rule_name] = result
return result
| (rule, nodes, error=False, _filtered=False) |
88 | arpeggio | __getattr__ |
Find a child (non)terminal by the rule name.
Args:
rule_name(str): The name of the rule that is referenced from
this node rule.
| def __getattr__(self, rule_name):
"""
Find a child (non)terminal by the rule name.
Args:
rule_name(str): The name of the rule that is referenced from
this node rule.
"""
# Prevent infinite recursion
if rule_name in ['_expr_cache', '_filtered', 'rule', 'rule_name',
'position', 'append', 'extend']:
raise AttributeError
try:
# First check the cache
if rule_name in self._expr_cache:
return self._expr_cache[rule_name]
except AttributeError:
# Navigation expression cache. Used for lookup by rule name.
self._expr_cache = {}
# If result is not found in the cache collect all nodes
# with the given rule name and create new NonTerminal
# and cache it for later access.
nodes = []
rule = None
for n in self:
if self._filtered:
# For filtered NT rule_name is a rule on
# each of its children
for m in n:
if m.rule_name == rule_name:
nodes.append(m)
rule = m.rule
else:
if n.rule_name == rule_name:
nodes.append(n)
rule = n.rule
if rule is None:
# If rule is not found resort to default behavior
return self.__getattribute__(rule_name)
result = NonTerminal(rule=rule, nodes=nodes, _filtered=True)
self._expr_cache[rule_name] = result
return result
| (self, rule_name) |
89 | arpeggio | __init__ | null | def __init__(self, rule, nodes, error=False, _filtered=False):
# Inherit position from the first child node
position = nodes[0].position if nodes else 0
super(NonTerminal, self).__init__(rule, position, error)
self.extend(flatten([nodes]))
self._filtered = _filtered
| (self, rule, nodes, error=False, _filtered=False) |
90 | arpeggio | __repr__ | null | def __repr__(self):
return "[ %s ]" % ", ".join([repr(x) for x in self])
| (self) |
91 | arpeggio | __str__ | null | def __str__(self):
return " | ".join([text(x) for x in self])
| (self) |
93 | arpeggio | flat_str |
Return flatten string representation.
| def flat_str(self):
"""
Return flatten string representation.
"""
return "".join([x.flat_str() for x in self])
| (self) |
94 | arpeggio | tree_str | null | def tree_str(self, indent=0):
return '{}\n{}'.format(super(NonTerminal, self).tree_str(indent),
'\n'.join([c.tree_str(indent + 1)
for c in self]))
| (self, indent=0) |
95 | arpeggio | visit |
Visitor pattern implementation.
Args:
visitor(PTNodeVisitor): The visitor object.
| def visit(self, visitor):
"""
Visitor pattern implementation.
Args:
visitor(PTNodeVisitor): The visitor object.
"""
if visitor.debug:
visitor.dprint("Visiting {} type:{} str:{}"
.format(self.name, type(self).__name__, text(self)))
children = SemanticActionResults()
if isinstance(self, NonTerminal):
for node in self:
child = node.visit(visitor)
# If visit returns None suppress that child node
if child is not None:
children.append_result(node.rule_name, child)
visit_name = "visit_%s" % self.rule_name
if hasattr(visitor, visit_name):
# Call visit method.
result = getattr(visitor, visit_name)(self, children)
# If there is a method with 'second' prefix save
# the result of visit for post-processing
if hasattr(visitor, "second_%s" % self.rule_name):
visitor.for_second_pass.append((self.rule_name, result))
return result
elif visitor.defaults:
# If default actions are enabled
return visitor.visit__default__(self, children)
| (self, visitor) |
96 | arpeggio | Not |
This predicate will succeed if the specified expression doesn't match
current input.
| class Not(SyntaxPredicate):
"""
This predicate will succeed if the specified expression doesn't match
current input.
"""
def _parse(self, parser):
c_pos = parser.position
old_in_not = parser.in_not
parser.in_not = True
try:
for e in self.nodes:
try:
e.parse(parser)
except NoMatch:
parser.position = c_pos
return
parser.position = c_pos
parser._nm_raise(self, c_pos, parser)
finally:
parser.in_not = old_in_not
| (*elements, **kwargs) |
99 | arpeggio | _parse | null | def _parse(self, parser):
c_pos = parser.position
old_in_not = parser.in_not
parser.in_not = True
try:
for e in self.nodes:
try:
e.parse(parser)
except NoMatch:
parser.position = c_pos
return
parser.position = c_pos
parser._nm_raise(self, c_pos, parser)
finally:
parser.in_not = old_in_not
| (self, parser) |
101 | arpeggio | OneOrMore |
OneOrMore will try to match parser expression specified one or more times.
| class OneOrMore(Repetition):
"""
OneOrMore will try to match parser expression specified one or more times.
"""
def _parse(self, parser):
results = []
first = True
if self.eolterm:
# Remember current eolterm and set eolterm of
# this repetition
old_eolterm = parser.eolterm
parser.eolterm = self.eolterm
# Prefetching
append = results.append
p = self.nodes[0].parse
sep = self.sep.parse if self.sep else None
result = None
try:
while True:
try:
c_pos = parser.position
if sep and result:
sep_result = sep(parser)
if sep_result:
append(sep_result)
result = p(parser)
if not result:
break
append(result)
first = False
except NoMatch:
parser.position = c_pos # Backtracking
if first:
raise
break
finally:
if self.eolterm:
# Restore previous eolterm
parser.eolterm = old_eolterm
return results
| (*elements, **kwargs) |
102 | arpeggio | __init__ | null | def __init__(self, *elements, **kwargs):
super(Repetition, self).__init__(*elements, **kwargs)
self.eolterm = kwargs.get('eolterm', False)
self.sep = kwargs.get('sep', None)
| (self, *elements, **kwargs) |
104 | arpeggio | _parse | null | def _parse(self, parser):
results = []
first = True
if self.eolterm:
# Remember current eolterm and set eolterm of
# this repetition
old_eolterm = parser.eolterm
parser.eolterm = self.eolterm
# Prefetching
append = results.append
p = self.nodes[0].parse
sep = self.sep.parse if self.sep else None
result = None
try:
while True:
try:
c_pos = parser.position
if sep and result:
sep_result = sep(parser)
if sep_result:
append(sep_result)
result = p(parser)
if not result:
break
append(result)
first = False
except NoMatch:
parser.position = c_pos # Backtracking
if first:
raise
break
finally:
if self.eolterm:
# Restore previous eolterm
parser.eolterm = old_eolterm
return results
| (self, parser) |
106 | arpeggio | Optional |
Optional will try to match parser expression specified and will not fail
in case match is not successful.
| class Optional(Repetition):
"""
Optional will try to match parser expression specified and will not fail
in case match is not successful.
"""
def _parse(self, parser):
result = None
c_pos = parser.position
try:
result = [self.nodes[0].parse(parser)]
except NoMatch:
parser.position = c_pos # Backtracking
return result
| (*elements, **kwargs) |
109 | arpeggio | _parse | null | def _parse(self, parser):
result = None
c_pos = parser.position
try:
result = [self.nodes[0].parse(parser)]
except NoMatch:
parser.position = c_pos # Backtracking
return result
| (self, parser) |
111 | arpeggio | OrderedChoice |
Will match one of the parser expressions specified. Parser will try to
match expressions in the order they are defined.
| class OrderedChoice(Sequence):
"""
Will match one of the parser expressions specified. Parser will try to
match expressions in the order they are defined.
"""
def _parse(self, parser):
result = None
match = False
c_pos = parser.position
if self.ws is not None:
old_ws = parser.ws
parser.ws = self.ws
if self.skipws is not None:
old_skipws = parser.skipws
parser.skipws = self.skipws
try:
for e in self.nodes:
try:
result = e.parse(parser)
if result is not None:
match = True
result = [result]
break
except NoMatch:
parser.position = c_pos # Backtracking
finally:
if self.ws is not None:
parser.ws = old_ws
if self.skipws is not None:
parser.skipws = old_skipws
if not match:
parser._nm_raise(self, c_pos, parser)
return result
| (*elements, **kwargs) |
112 | arpeggio | __init__ | null | def __init__(self, *elements, **kwargs):
super(Sequence, self).__init__(*elements, **kwargs)
self.ws = kwargs.pop('ws', None)
self.skipws = kwargs.pop('skipws', None)
| (self, *elements, **kwargs) |
114 | arpeggio | _parse | null | def _parse(self, parser):
result = None
match = False
c_pos = parser.position
if self.ws is not None:
old_ws = parser.ws
parser.ws = self.ws
if self.skipws is not None:
old_skipws = parser.skipws
parser.skipws = self.skipws
try:
for e in self.nodes:
try:
result = e.parse(parser)
if result is not None:
match = True
result = [result]
break
except NoMatch:
parser.position = c_pos # Backtracking
finally:
if self.ws is not None:
parser.ws = old_ws
if self.skipws is not None:
parser.skipws = old_skipws
if not match:
parser._nm_raise(self, c_pos, parser)
return result
| (self, parser) |
116 | collections | OrderedDict | Dictionary that remembers insertion order | class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as regular dictionaries.
# The internal self.__map dict maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# The sentinel is in self.__hardroot with a weakref proxy in self.__root.
# The prev links are weakref proxies (to prevent circular references).
# Individual links are kept alive by the hard reference in self.__map.
# Those hard references disappear when a key is deleted from an OrderedDict.
def __init__(self, other=(), /, **kwds):
'''Initialize an ordered dictionary. The signature is the same as
regular dictionaries. Keyword argument order is preserved.
'''
try:
self.__root
except AttributeError:
self.__hardroot = _Link()
self.__root = root = _proxy(self.__hardroot)
root.prev = root.next = root
self.__map = {}
self.__update(other, **kwds)
def __setitem__(self, key, value,
dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link at the end of the linked list,
# and the inherited dictionary is updated with the new key/value pair.
if key not in self:
self.__map[key] = link = Link()
root = self.__root
last = root.prev
link.prev, link.next, link.key = last, root, key
last.next = link
root.prev = proxy(link)
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which gets
# removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link = self.__map.pop(key)
link_prev = link.prev
link_next = link.next
link_prev.next = link_next
link_next.prev = link_prev
link.prev = None
link.next = None
def __iter__(self):
'od.__iter__() <==> iter(od)'
# Traverse the linked list in order.
root = self.__root
curr = root.next
while curr is not root:
yield curr.key
curr = curr.next
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
# Traverse the linked list in reverse order.
root = self.__root
curr = root.prev
while curr is not root:
yield curr.key
curr = curr.prev
def clear(self):
'od.clear() -> None. Remove all items from od.'
root = self.__root
root.prev = root.next = root
self.__map.clear()
dict.clear(self)
def popitem(self, last=True):
'''Remove and return a (key, value) pair from the dictionary.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root.prev
link_prev = link.prev
link_prev.next = root
root.prev = link_prev
else:
link = root.next
link_next = link.next
root.next = link_next
link_next.prev = root
key = link.key
del self.__map[key]
value = dict.pop(self, key)
return key, value
def move_to_end(self, key, last=True):
'''Move an existing element to the end (or beginning if last is false).
Raise KeyError if the element does not exist.
'''
link = self.__map[key]
link_prev = link.prev
link_next = link.next
soft_link = link_next.prev
link_prev.next = link_next
link_next.prev = link_prev
root = self.__root
if last:
last = root.prev
link.prev = last
link.next = root
root.prev = soft_link
last.next = link
else:
first = root.next
link.prev = root
link.next = first
first.prev = soft_link
root.next = link
def __sizeof__(self):
sizeof = _sys.getsizeof
n = len(self) + 1 # number of links including root
size = sizeof(self.__dict__) # instance dictionary
size += sizeof(self.__map) * 2 # internal dict and inherited dict
size += sizeof(self.__hardroot) * n # link objects
size += sizeof(self.__root) * n # proxy objects
return size
update = __update = _collections_abc.MutableMapping.update
def keys(self):
"D.keys() -> a set-like object providing a view on D's keys"
return _OrderedDictKeysView(self)
def items(self):
"D.items() -> a set-like object providing a view on D's items"
return _OrderedDictItemsView(self)
def values(self):
"D.values() -> an object providing a view on D's values"
return _OrderedDictValuesView(self)
__ne__ = _collections_abc.MutableMapping.__ne__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding
value. If key is not found, d is returned if given, otherwise KeyError
is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'''Insert key with a value of default if key is not in the dictionary.
Return the value for key if key is in the dictionary, else default.
'''
if key in self:
return self[key]
self[key] = default
return default
@_recursive_repr()
def __repr__(self):
'od.__repr__() <==> repr(od)'
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def __reduce__(self):
'Return state information for pickling'
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
return self.__class__, (), inst_dict or None, None, iter(self.items())
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''Create a new ordered dictionary with keys from iterable and values set to value.
'''
self = cls()
for key in iterable:
self[key] = value
return self
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return dict.__eq__(self, other) and all(map(_eq, self, other))
return dict.__eq__(self, other)
def __ior__(self, other):
self.update(other)
return self
def __or__(self, other):
if not isinstance(other, dict):
return NotImplemented
new = self.__class__(self)
new.update(other)
return new
def __ror__(self, other):
if not isinstance(other, dict):
return NotImplemented
new = self.__class__(other)
new.update(self)
return new
| null |
117 | arpeggio | PTNodeVisitor |
Base class for all parse tree visitors.
| class PTNodeVisitor(DebugPrinter):
"""
Base class for all parse tree visitors.
"""
def __init__(self, defaults=True, **kwargs):
"""
Args:
defaults(bool): If the default visit method should be applied in
case no method is defined.
"""
self.for_second_pass = []
self.defaults = defaults
super(PTNodeVisitor, self).__init__(**kwargs)
def visit__default__(self, node, children):
"""
Called if no visit method is defined for the node.
Args:
node(ParseTreeNode):
children(processed children ParseTreeNode-s):
"""
if isinstance(node, Terminal):
# Default for Terminal is to convert to string unless suppress flag
# is set in which case it is suppressed by setting to None.
retval = text(node) if not node.suppress else None
else:
retval = node
# Special case. If only one child exist return it.
if len(children) == 1:
retval = children[0]
else:
# If there is only one non-string child return
# that by default. This will support e.g. bracket
# removals.
last_non_str = None
for c in children:
if not isstr(c):
if last_non_str is None:
last_non_str = c
else:
# If there is multiple non-string objects
# by default convert non-terminal to string
if self.debug:
self.dprint("*** Warning: Multiple "
"non-string objects found in "
"default visit. Converting non-"
"terminal to a string.")
retval = text(node)
break
else:
# Return the only non-string child
retval = last_non_str
return retval
| (defaults=True, **kwargs) |
118 | arpeggio | __init__ |
Args:
defaults(bool): If the default visit method should be applied in
case no method is defined.
| def __init__(self, defaults=True, **kwargs):
"""
Args:
defaults(bool): If the default visit method should be applied in
case no method is defined.
"""
self.for_second_pass = []
self.defaults = defaults
super(PTNodeVisitor, self).__init__(**kwargs)
| (self, defaults=True, **kwargs) |
120 | arpeggio | visit__default__ |
Called if no visit method is defined for the node.
Args:
node(ParseTreeNode):
children(processed children ParseTreeNode-s):
| def visit__default__(self, node, children):
"""
Called if no visit method is defined for the node.
Args:
node(ParseTreeNode):
children(processed children ParseTreeNode-s):
"""
if isinstance(node, Terminal):
# Default for Terminal is to convert to string unless suppress flag
# is set in which case it is suppressed by setting to None.
retval = text(node) if not node.suppress else None
else:
retval = node
# Special case. If only one child exist return it.
if len(children) == 1:
retval = children[0]
else:
# If there is only one non-string child return
# that by default. This will support e.g. bracket
# removals.
last_non_str = None
for c in children:
if not isstr(c):
if last_non_str is None:
last_non_str = c
else:
# If there is multiple non-string objects
# by default convert non-terminal to string
if self.debug:
self.dprint("*** Warning: Multiple "
"non-string objects found in "
"default visit. Converting non-"
"terminal to a string.")
retval = text(node)
break
else:
# Return the only non-string child
retval = last_non_str
return retval
| (self, node, children) |
121 | arpeggio | ParseTreeNode |
Abstract base class representing node of the Parse Tree.
The node can be terminal(the leaf of the parse tree) or non-terminal.
Attributes:
rule (ParsingExpression): The rule that created this node.
rule_name (str): The name of the rule that created this node if
root rule or empty string otherwise.
position (int): A position in the input stream where the match
occurred.
position_end (int, read-only): A position in the input stream where
the node ends.
This position is one char behind the last char contained in this
node. Thus, position_end - position = length of the node.
error (bool): Is this a false parse tree node created during error
recovery.
comments : A parse tree of comment(s) attached to this node.
| class ParseTreeNode(object):
"""
Abstract base class representing node of the Parse Tree.
The node can be terminal(the leaf of the parse tree) or non-terminal.
Attributes:
rule (ParsingExpression): The rule that created this node.
rule_name (str): The name of the rule that created this node if
root rule or empty string otherwise.
position (int): A position in the input stream where the match
occurred.
position_end (int, read-only): A position in the input stream where
the node ends.
This position is one char behind the last char contained in this
node. Thus, position_end - position = length of the node.
error (bool): Is this a false parse tree node created during error
recovery.
comments : A parse tree of comment(s) attached to this node.
"""
def __init__(self, rule, position, error):
assert rule
assert rule.rule_name is not None
self.rule = rule
self.rule_name = rule.rule_name
self.position = position
self.error = error
self.comments = None
@property
def name(self):
return "%s [%s]" % (self.rule_name, self.position)
@property
def position_end(self):
"Must be implemented in subclasses."
raise NotImplementedError
def visit(self, visitor):
"""
Visitor pattern implementation.
Args:
visitor(PTNodeVisitor): The visitor object.
"""
if visitor.debug:
visitor.dprint("Visiting {} type:{} str:{}"
.format(self.name, type(self).__name__, text(self)))
children = SemanticActionResults()
if isinstance(self, NonTerminal):
for node in self:
child = node.visit(visitor)
# If visit returns None suppress that child node
if child is not None:
children.append_result(node.rule_name, child)
visit_name = "visit_%s" % self.rule_name
if hasattr(visitor, visit_name):
# Call visit method.
result = getattr(visitor, visit_name)(self, children)
# If there is a method with 'second' prefix save
# the result of visit for post-processing
if hasattr(visitor, "second_%s" % self.rule_name):
visitor.for_second_pass.append((self.rule_name, result))
return result
elif visitor.defaults:
# If default actions are enabled
return visitor.visit__default__(self, children)
def tree_str(self, indent=0):
return '{}{} [{}-{}]'.format(' ' * indent, self.rule.name,
self.position, self.position_end)
| (rule, position, error) |
122 | arpeggio | __init__ | null | def __init__(self, rule, position, error):
assert rule
assert rule.rule_name is not None
self.rule = rule
self.rule_name = rule.rule_name
self.position = position
self.error = error
self.comments = None
| (self, rule, position, error) |
123 | arpeggio | tree_str | null | def tree_str(self, indent=0):
return '{}{} [{}-{}]'.format(' ' * indent, self.rule.name,
self.position, self.position_end)
| (self, indent=0) |
125 | arpeggio | Parser |
Abstract base class for all parsers.
Attributes:
comments_model: parser model for comments.
comments(list): A list of ParseTreeNode for matched comments.
sem_actions(dict): A dictionary of semantic actions keyed by the
rule name.
parse_tree(NonTerminal): The parse tree consisting of NonTerminal and
Terminal instances.
in_rule (str): Current rule name.
in_parse_comments (bool): True if parsing comments.
in_lex_rule (bool): True if in lexical rule. Currently used in Combine
decorator to convert match to a single Terminal.
in_not (bool): True if in Not parsing expression. Used for better error
reporting.
last_pexpression (ParsingExpression): Last parsing expression
traversed.
| class Parser(DebugPrinter):
"""
Abstract base class for all parsers.
Attributes:
comments_model: parser model for comments.
comments(list): A list of ParseTreeNode for matched comments.
sem_actions(dict): A dictionary of semantic actions keyed by the
rule name.
parse_tree(NonTerminal): The parse tree consisting of NonTerminal and
Terminal instances.
in_rule (str): Current rule name.
in_parse_comments (bool): True if parsing comments.
in_lex_rule (bool): True if in lexical rule. Currently used in Combine
decorator to convert match to a single Terminal.
in_not (bool): True if in Not parsing expression. Used for better error
reporting.
last_pexpression (ParsingExpression): Last parsing expression
traversed.
"""
# Not marker for NoMatch rules list. Used if the first unsuccessful rule
# match is Not.
FIRST_NOT = Not()
def __init__(self, skipws=True, ws=None, reduce_tree=False, autokwd=False,
ignore_case=False, memoization=False, **kwargs):
"""
Args:
skipws (bool): Should the whitespace skipping be done. Default is
True.
ws (str): A string consisting of whitespace characters.
reduce_tree (bool): If true non-terminals with single child will be
eliminated from the parse tree. Default is False.
autokwd(bool): If keyword-like StrMatches are matched on word
boundaries. Default is False.
ignore_case(bool): If case is ignored (default=False)
memoization(bool): If memoization should be used
(a.k.a. packrat parsing)
"""
super(Parser, self).__init__(**kwargs)
# Used to indicate state in which parser should not
# treat newlines as whitespaces.
self._eolterm = False
self.skipws = skipws
if ws is not None:
self.ws = ws
else:
self.ws = DEFAULT_WS
self.reduce_tree = reduce_tree
self.autokwd = autokwd
self.ignore_case = ignore_case
self.memoization = memoization
self.comments_model = None
self.comments = []
self.comment_positions = {}
self.sem_actions = {}
self.parse_tree = None
# Create regex used for autokwd matching
flags = 0
if ignore_case:
flags = re.IGNORECASE
self.keyword_regex = re.compile(r'[^\d\W]\w*', flags)
# Keep track of root rule we are currently in.
# Used for debugging purposes
self.in_rule = ''
self.in_parse_comments = False
# Are we in lexical rule? If so do not
# skip whitespaces.
self.in_lex_rule = False
# Are we in Not parsing expression?
self.in_not = False
# Last parsing expression traversed
self.last_pexpression = None
@property
def ws(self):
return self._ws
@ws.setter
def ws(self, new_value):
self._real_ws = new_value
self._ws = new_value
if self.eolterm:
self._ws = self._ws.replace('\n', '').replace('\r', '')
@property
def eolterm(self):
return self._eolterm
@eolterm.setter
def eolterm(self, new_value):
# Toggle newline char in ws on eolterm property set.
# During eolterm state parser should not treat
# newline as a whitespace.
self._eolterm = new_value
if self._eolterm:
self._ws = self._ws.replace('\n', '').replace('\r', '')
else:
self._ws = self._real_ws
def parse(self, _input, file_name=None):
"""
Parses input and produces parse tree.
Args:
_input(str): An input string to parse.
file_name(str): If input is loaded from file this can be
set to file name. It is used in error messages.
"""
self.position = 0 # Input position
self.nm = None # Last NoMatch exception
self.line_ends = []
self.input = _input
self.file_name = file_name
self.comment_positions = {}
self.cache_hits = 0
self.cache_misses = 0
try:
self.parse_tree = self._parse()
except NoMatch as e:
# Remove Not marker
if e.rules[0] is Parser.FIRST_NOT:
del e.rules[0]
# Get line and column from position
e.line, e.col = self.pos_to_linecol(e.position)
raise
finally:
# At end of parsing clear all memoization caches.
# Do this here to free memory.
if self.memoization:
self._clear_caches()
# In debug mode export parse tree to dot file for
# visualization
if self.debug and self.parse_tree:
from arpeggio.export import PTDOTExporter
root_rule_name = self.parse_tree.rule_name
PTDOTExporter().exportFile(
self.parse_tree, "{}_parse_tree.dot".format(root_rule_name))
return self.parse_tree
def parse_file(self, file_name):
"""
Parses content from the given file.
Args:
file_name(str): A file name.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
content = f.read()
return self.parse(content, file_name=file_name)
def getASG(self, sem_actions=None, defaults=True):
"""
Creates Abstract Semantic Graph (ASG) from the parse tree.
Args:
sem_actions (dict): The semantic actions dictionary to use for
semantic analysis. Rule names are the keys and semantic action
objects are values.
defaults (bool): If True a default semantic action will be
applied in case no action is defined for the node.
"""
if not self.parse_tree:
raise Exception(
"Parse tree is empty. You did call parse(), didn't you?")
if sem_actions is None:
if not self.sem_actions:
raise Exception("Semantic actions not defined.")
else:
sem_actions = self.sem_actions
if type(sem_actions) is not dict:
raise Exception("Semantic actions parameter must be a dictionary.")
for_second_pass = []
def tree_walk(node):
"""
Walking the parse tree and calling first_pass for every registered
semantic actions and creating list of object that needs to be
called in the second pass.
"""
if self.debug:
self.dprint(
"Walking down %s type: %s str: %s" %
(node.name, type(node).__name__, text(node)))
children = SemanticActionResults()
if isinstance(node, NonTerminal):
for n in node:
child = tree_walk(n)
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
self.dprint("Processing %s = '%s' type:%s len:%d" %
(node.name, text(node), type(node).__name__,
len(node) if isinstance(node, list) else 0))
for i, a in enumerate(children):
self.dprint(" %d:%s type:%s" %
(i+1, text(a), type(a).__name__))
if node.rule_name in sem_actions:
sem_action = sem_actions[node.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, node, children)
else:
retval = sem_action.first_pass(self, node, children)
if hasattr(sem_action, "second_pass"):
for_second_pass.append((node.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ \
if hasattr(sem_action, '__name__') \
else sem_action.__class__.__name__
self.dprint(" Applying semantic action %s" % action_name)
else:
if defaults:
# If no rule is present use some sane defaults
if self.debug:
self.dprint(" Applying default semantic action.")
retval = SemanticAction().first_pass(self, node, children)
else:
retval = node
if self.debug:
if retval is None:
self.dprint(" Suppressed.")
else:
self.dprint(" Resolved to = %s type:%s" %
(text(retval), type(retval).__name__))
return retval
if self.debug:
self.dprint("ASG: First pass")
asg = tree_walk(self.parse_tree)
# Second pass
if self.debug:
self.dprint("ASG: Second pass")
for sa_name, asg_node in for_second_pass:
sem_actions[sa_name].second_pass(self, asg_node)
return asg
def pos_to_linecol(self, pos):
"""
Calculate (line, column) tuple for the given position in the stream.
"""
if not self.line_ends:
try:
# TODO: Check this implementation on Windows.
self.line_ends.append(self.input.index("\n"))
while True:
try:
self.line_ends.append(
self.input.index("\n", self.line_ends[-1] + 1))
except ValueError:
break
except ValueError:
pass
line = bisect.bisect_left(self.line_ends, pos)
col = pos
if line > 0:
col -= self.line_ends[line - 1]
if self.input[self.line_ends[line - 1]] in '\n\r':
col -= 1
return line + 1, col + 1
def context(self, length=None, position=None):
"""
Returns current context substring, i.e. the substring around current
position.
Args:
length(int): If given used to mark with asterisk a length chars
from the current position.
position(int): The position in the input stream.
"""
if not position:
position = self.position
if length:
retval = "{}*{}*{}".format(
text(self.input[max(position - 10, 0):position]),
text(self.input[position:position + length]),
text(self.input[position + length:position + 10]))
else:
retval = "{}*{}".format(
text(self.input[max(position - 10, 0):position]),
text(self.input[position:position + 10]))
return retval.replace('\n', ' ').replace('\r', '')
def _nm_raise(self, *args):
"""
Register new NoMatch object if the input is consumed
from the last NoMatch and raise last NoMatch.
Args:
args: A NoMatch instance or (value, position, parser)
"""
rule, position, parser = args
if self.nm is None or not parser.in_parse_comments:
if self.nm is None or position > self.nm.position:
if self.in_not:
self.nm = NoMatch([Parser.FIRST_NOT], position, parser)
else:
self.nm = NoMatch([rule], position, parser)
elif position == self.nm.position and isinstance(rule, Match) \
and not self.in_not:
self.nm.rules.append(rule)
raise self.nm
def _clear_caches(self):
"""
Clear memoization caches if packrat parser is used.
"""
self.parser_model._clear_cache()
if self.comments_model:
self.comments_model._clear_cache()
| (skipws=True, ws=None, reduce_tree=False, autokwd=False, ignore_case=False, memoization=False, **kwargs) |
126 | arpeggio | __init__ |
Args:
skipws (bool): Should the whitespace skipping be done. Default is
True.
ws (str): A string consisting of whitespace characters.
reduce_tree (bool): If true non-terminals with single child will be
eliminated from the parse tree. Default is False.
autokwd(bool): If keyword-like StrMatches are matched on word
boundaries. Default is False.
ignore_case(bool): If case is ignored (default=False)
memoization(bool): If memoization should be used
(a.k.a. packrat parsing)
| def __init__(self, skipws=True, ws=None, reduce_tree=False, autokwd=False,
ignore_case=False, memoization=False, **kwargs):
"""
Args:
skipws (bool): Should the whitespace skipping be done. Default is
True.
ws (str): A string consisting of whitespace characters.
reduce_tree (bool): If true non-terminals with single child will be
eliminated from the parse tree. Default is False.
autokwd(bool): If keyword-like StrMatches are matched on word
boundaries. Default is False.
ignore_case(bool): If case is ignored (default=False)
memoization(bool): If memoization should be used
(a.k.a. packrat parsing)
"""
super(Parser, self).__init__(**kwargs)
# Used to indicate state in which parser should not
# treat newlines as whitespaces.
self._eolterm = False
self.skipws = skipws
if ws is not None:
self.ws = ws
else:
self.ws = DEFAULT_WS
self.reduce_tree = reduce_tree
self.autokwd = autokwd
self.ignore_case = ignore_case
self.memoization = memoization
self.comments_model = None
self.comments = []
self.comment_positions = {}
self.sem_actions = {}
self.parse_tree = None
# Create regex used for autokwd matching
flags = 0
if ignore_case:
flags = re.IGNORECASE
self.keyword_regex = re.compile(r'[^\d\W]\w*', flags)
# Keep track of root rule we are currently in.
# Used for debugging purposes
self.in_rule = ''
self.in_parse_comments = False
# Are we in lexical rule? If so do not
# skip whitespaces.
self.in_lex_rule = False
# Are we in Not parsing expression?
self.in_not = False
# Last parsing expression traversed
self.last_pexpression = None
| (self, skipws=True, ws=None, reduce_tree=False, autokwd=False, ignore_case=False, memoization=False, **kwargs) |
127 | arpeggio | _clear_caches |
Clear memoization caches if packrat parser is used.
| def _clear_caches(self):
"""
Clear memoization caches if packrat parser is used.
"""
self.parser_model._clear_cache()
if self.comments_model:
self.comments_model._clear_cache()
| (self) |
128 | arpeggio | _nm_raise |
Register new NoMatch object if the input is consumed
from the last NoMatch and raise last NoMatch.
Args:
args: A NoMatch instance or (value, position, parser)
| def _nm_raise(self, *args):
"""
Register new NoMatch object if the input is consumed
from the last NoMatch and raise last NoMatch.
Args:
args: A NoMatch instance or (value, position, parser)
"""
rule, position, parser = args
if self.nm is None or not parser.in_parse_comments:
if self.nm is None or position > self.nm.position:
if self.in_not:
self.nm = NoMatch([Parser.FIRST_NOT], position, parser)
else:
self.nm = NoMatch([rule], position, parser)
elif position == self.nm.position and isinstance(rule, Match) \
and not self.in_not:
self.nm.rules.append(rule)
raise self.nm
| (self, *args) |
129 | arpeggio | context |
Returns current context substring, i.e. the substring around current
position.
Args:
length(int): If given used to mark with asterisk a length chars
from the current position.
position(int): The position in the input stream.
| def context(self, length=None, position=None):
"""
Returns current context substring, i.e. the substring around current
position.
Args:
length(int): If given used to mark with asterisk a length chars
from the current position.
position(int): The position in the input stream.
"""
if not position:
position = self.position
if length:
retval = "{}*{}*{}".format(
text(self.input[max(position - 10, 0):position]),
text(self.input[position:position + length]),
text(self.input[position + length:position + 10]))
else:
retval = "{}*{}".format(
text(self.input[max(position - 10, 0):position]),
text(self.input[position:position + 10]))
return retval.replace('\n', ' ').replace('\r', '')
| (self, length=None, position=None) |
131 | arpeggio | getASG |
Creates Abstract Semantic Graph (ASG) from the parse tree.
Args:
sem_actions (dict): The semantic actions dictionary to use for
semantic analysis. Rule names are the keys and semantic action
objects are values.
defaults (bool): If True a default semantic action will be
applied in case no action is defined for the node.
| def getASG(self, sem_actions=None, defaults=True):
"""
Creates Abstract Semantic Graph (ASG) from the parse tree.
Args:
sem_actions (dict): The semantic actions dictionary to use for
semantic analysis. Rule names are the keys and semantic action
objects are values.
defaults (bool): If True a default semantic action will be
applied in case no action is defined for the node.
"""
if not self.parse_tree:
raise Exception(
"Parse tree is empty. You did call parse(), didn't you?")
if sem_actions is None:
if not self.sem_actions:
raise Exception("Semantic actions not defined.")
else:
sem_actions = self.sem_actions
if type(sem_actions) is not dict:
raise Exception("Semantic actions parameter must be a dictionary.")
for_second_pass = []
def tree_walk(node):
"""
Walking the parse tree and calling first_pass for every registered
semantic actions and creating list of object that needs to be
called in the second pass.
"""
if self.debug:
self.dprint(
"Walking down %s type: %s str: %s" %
(node.name, type(node).__name__, text(node)))
children = SemanticActionResults()
if isinstance(node, NonTerminal):
for n in node:
child = tree_walk(n)
if child is not None:
children.append_result(n.rule_name, child)
if self.debug:
self.dprint("Processing %s = '%s' type:%s len:%d" %
(node.name, text(node), type(node).__name__,
len(node) if isinstance(node, list) else 0))
for i, a in enumerate(children):
self.dprint(" %d:%s type:%s" %
(i+1, text(a), type(a).__name__))
if node.rule_name in sem_actions:
sem_action = sem_actions[node.rule_name]
if isinstance(sem_action, types.FunctionType):
retval = sem_action(self, node, children)
else:
retval = sem_action.first_pass(self, node, children)
if hasattr(sem_action, "second_pass"):
for_second_pass.append((node.rule_name, retval))
if self.debug:
action_name = sem_action.__name__ \
if hasattr(sem_action, '__name__') \
else sem_action.__class__.__name__
self.dprint(" Applying semantic action %s" % action_name)
else:
if defaults:
# If no rule is present use some sane defaults
if self.debug:
self.dprint(" Applying default semantic action.")
retval = SemanticAction().first_pass(self, node, children)
else:
retval = node
if self.debug:
if retval is None:
self.dprint(" Suppressed.")
else:
self.dprint(" Resolved to = %s type:%s" %
(text(retval), type(retval).__name__))
return retval
if self.debug:
self.dprint("ASG: First pass")
asg = tree_walk(self.parse_tree)
# Second pass
if self.debug:
self.dprint("ASG: Second pass")
for sa_name, asg_node in for_second_pass:
sem_actions[sa_name].second_pass(self, asg_node)
return asg
| (self, sem_actions=None, defaults=True) |
132 | arpeggio | parse |
Parses input and produces parse tree.
Args:
_input(str): An input string to parse.
file_name(str): If input is loaded from file this can be
set to file name. It is used in error messages.
| def parse(self, _input, file_name=None):
"""
Parses input and produces parse tree.
Args:
_input(str): An input string to parse.
file_name(str): If input is loaded from file this can be
set to file name. It is used in error messages.
"""
self.position = 0 # Input position
self.nm = None # Last NoMatch exception
self.line_ends = []
self.input = _input
self.file_name = file_name
self.comment_positions = {}
self.cache_hits = 0
self.cache_misses = 0
try:
self.parse_tree = self._parse()
except NoMatch as e:
# Remove Not marker
if e.rules[0] is Parser.FIRST_NOT:
del e.rules[0]
# Get line and column from position
e.line, e.col = self.pos_to_linecol(e.position)
raise
finally:
# At end of parsing clear all memoization caches.
# Do this here to free memory.
if self.memoization:
self._clear_caches()
# In debug mode export parse tree to dot file for
# visualization
if self.debug and self.parse_tree:
from arpeggio.export import PTDOTExporter
root_rule_name = self.parse_tree.rule_name
PTDOTExporter().exportFile(
self.parse_tree, "{}_parse_tree.dot".format(root_rule_name))
return self.parse_tree
| (self, _input, file_name=None) |
133 | arpeggio | parse_file |
Parses content from the given file.
Args:
file_name(str): A file name.
| def parse_file(self, file_name):
"""
Parses content from the given file.
Args:
file_name(str): A file name.
"""
with codecs.open(file_name, 'r', 'utf-8') as f:
content = f.read()
return self.parse(content, file_name=file_name)
| (self, file_name) |
134 | arpeggio | pos_to_linecol |
Calculate (line, column) tuple for the given position in the stream.
| def pos_to_linecol(self, pos):
"""
Calculate (line, column) tuple for the given position in the stream.
"""
if not self.line_ends:
try:
# TODO: Check this implementation on Windows.
self.line_ends.append(self.input.index("\n"))
while True:
try:
self.line_ends.append(
self.input.index("\n", self.line_ends[-1] + 1))
except ValueError:
break
except ValueError:
pass
line = bisect.bisect_left(self.line_ends, pos)
col = pos
if line > 0:
col -= self.line_ends[line - 1]
if self.input[self.line_ends[line - 1]] in '\n\r':
col -= 1
return line + 1, col + 1
| (self, pos) |
135 | arpeggio | ParserPython | null | class ParserPython(Parser):
def __init__(self, language_def, comment_def=None, syntax_classes=None,
*args, **kwargs):
"""
Constructs parser from python statements and expressions.
Args:
language_def (python function): A python function that defines
the root rule of the grammar.
comment_def (python function): A python function that defines
the root rule of the comments grammar.
syntax_classes (dict): Overrides of special syntax parser
expression classes (StrMatch, Sequence, OrderedChoice).
"""
super(ParserPython, self).__init__(*args, **kwargs)
self.syntax_classes = syntax_classes if syntax_classes else {}
# PEG Abstract Syntax Graph
self.parser_model = self._from_python(language_def)
self.comments_model = None
if comment_def:
self.comments_model = self._from_python(comment_def)
self.comments_model.root = True
self.comments_model.rule_name = comment_def.__name__
# In debug mode export parser model to dot for
# visualization
if self.debug:
from arpeggio.export import PMDOTExporter
root_rule = language_def.__name__
PMDOTExporter().exportFile(self.parser_model,
"{}_parser_model.dot".format(root_rule))
def _parse(self):
return self.parser_model.parse(self)
def _from_python(self, expression):
"""
Create parser model from the definition given in the form of python
functions returning lists, tuples, callables, strings and
ParsingExpression objects.
Returns:
Parser Model (PEG Abstract Semantic Graph)
"""
__rule_cache = {"EndOfFile": EndOfFile()}
__for_resolving = [] # Expressions that needs crossref resolvnih
self.__cross_refs = 0
_StrMatch = self.syntax_classes.get('StrMatch', StrMatch)
_OrderedChoice = self.syntax_classes.get('OrderedChoice',
OrderedChoice)
_Sequence = self.syntax_classes.get('Sequence', Sequence)
def inner_from_python(expression):
retval = None
if isinstance(expression, types.FunctionType):
# If this expression is a parser rule
rule_name = expression.__name__
if rule_name in __rule_cache:
c_rule = __rule_cache.get(rule_name)
if self.debug:
self.dprint("Rule {} founded in cache."
.format(rule_name))
if isinstance(c_rule, CrossRef):
self.__cross_refs += 1
if self.debug:
self.dprint("CrossRef usage: {}"
.format(c_rule.target_rule_name))
return c_rule
# Semantic action for the rule
if hasattr(expression, "sem"):
self.sem_actions[rule_name] = expression.sem
# Register rule cross-ref to support recursion
__rule_cache[rule_name] = CrossRef(rule_name)
curr_expr = expression
while isinstance(curr_expr, types.FunctionType):
# If function directly returns another function
# go into until non-function is returned.
curr_expr = curr_expr()
retval = inner_from_python(curr_expr)
retval.rule_name = rule_name
retval.root = True
# Update cache
__rule_cache[rule_name] = retval
if self.debug:
self.dprint("New rule: {} -> {}"
.format(rule_name, retval.__class__.__name__))
elif type(expression) is text or isinstance(expression, _StrMatch):
if type(expression) is text:
retval = _StrMatch(expression,
ignore_case=self.ignore_case)
else:
retval = expression
if expression.ignore_case is None:
expression.ignore_case = self.ignore_case
if self.autokwd:
to_match = retval.to_match
match = self.keyword_regex.match(to_match)
if match and match.span() == (0, len(to_match)):
retval = RegExMatch(r'{}\b'.format(to_match),
ignore_case=self.ignore_case,
str_repr=to_match)
retval.compile()
elif isinstance(expression, RegExMatch):
# Regular expression are not compiled yet
# to support global settings propagation from
# parser.
if expression.ignore_case is None:
expression.ignore_case = self.ignore_case
expression.compile()
retval = expression
elif isinstance(expression, Match):
retval = expression
elif isinstance(expression, UnorderedGroup):
retval = expression
for n in retval.elements:
retval.nodes.append(inner_from_python(n))
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
elif isinstance(expression, _Sequence) or \
isinstance(expression, Repetition) or \
isinstance(expression, SyntaxPredicate) or \
isinstance(expression, Decorator):
retval = expression
retval.nodes.append(inner_from_python(retval.elements))
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
elif type(expression) in [list, tuple]:
if type(expression) is list:
retval = _OrderedChoice(expression)
else:
retval = _Sequence(expression)
retval.nodes = [inner_from_python(e) for e in expression]
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
else:
raise GrammarError("Unrecognized grammar element '%s'." %
text(expression))
# Translate separator expression.
if isinstance(expression, Repetition) and expression.sep:
expression.sep = inner_from_python(expression.sep)
return retval
# Cross-ref resolving
def resolve():
for e in __for_resolving:
for i, node in enumerate(e.nodes):
if isinstance(node, CrossRef):
self.__cross_refs -= 1
e.nodes[i] = __rule_cache[node.target_rule_name]
parser_model = inner_from_python(expression)
resolve()
assert self.__cross_refs == 0, "Not all crossrefs are resolved!"
return parser_model
def errors(self):
pass
| (language_def, comment_def=None, syntax_classes=None, *args, **kwargs) |
136 | arpeggio | __init__ |
Constructs parser from python statements and expressions.
Args:
language_def (python function): A python function that defines
the root rule of the grammar.
comment_def (python function): A python function that defines
the root rule of the comments grammar.
syntax_classes (dict): Overrides of special syntax parser
expression classes (StrMatch, Sequence, OrderedChoice).
| def __init__(self, language_def, comment_def=None, syntax_classes=None,
*args, **kwargs):
"""
Constructs parser from python statements and expressions.
Args:
language_def (python function): A python function that defines
the root rule of the grammar.
comment_def (python function): A python function that defines
the root rule of the comments grammar.
syntax_classes (dict): Overrides of special syntax parser
expression classes (StrMatch, Sequence, OrderedChoice).
"""
super(ParserPython, self).__init__(*args, **kwargs)
self.syntax_classes = syntax_classes if syntax_classes else {}
# PEG Abstract Syntax Graph
self.parser_model = self._from_python(language_def)
self.comments_model = None
if comment_def:
self.comments_model = self._from_python(comment_def)
self.comments_model.root = True
self.comments_model.rule_name = comment_def.__name__
# In debug mode export parser model to dot for
# visualization
if self.debug:
from arpeggio.export import PMDOTExporter
root_rule = language_def.__name__
PMDOTExporter().exportFile(self.parser_model,
"{}_parser_model.dot".format(root_rule))
| (self, language_def, comment_def=None, syntax_classes=None, *args, **kwargs) |
138 | arpeggio | _from_python |
Create parser model from the definition given in the form of python
functions returning lists, tuples, callables, strings and
ParsingExpression objects.
Returns:
Parser Model (PEG Abstract Semantic Graph)
| def _from_python(self, expression):
"""
Create parser model from the definition given in the form of python
functions returning lists, tuples, callables, strings and
ParsingExpression objects.
Returns:
Parser Model (PEG Abstract Semantic Graph)
"""
__rule_cache = {"EndOfFile": EndOfFile()}
__for_resolving = [] # Expressions that needs crossref resolvnih
self.__cross_refs = 0
_StrMatch = self.syntax_classes.get('StrMatch', StrMatch)
_OrderedChoice = self.syntax_classes.get('OrderedChoice',
OrderedChoice)
_Sequence = self.syntax_classes.get('Sequence', Sequence)
def inner_from_python(expression):
retval = None
if isinstance(expression, types.FunctionType):
# If this expression is a parser rule
rule_name = expression.__name__
if rule_name in __rule_cache:
c_rule = __rule_cache.get(rule_name)
if self.debug:
self.dprint("Rule {} founded in cache."
.format(rule_name))
if isinstance(c_rule, CrossRef):
self.__cross_refs += 1
if self.debug:
self.dprint("CrossRef usage: {}"
.format(c_rule.target_rule_name))
return c_rule
# Semantic action for the rule
if hasattr(expression, "sem"):
self.sem_actions[rule_name] = expression.sem
# Register rule cross-ref to support recursion
__rule_cache[rule_name] = CrossRef(rule_name)
curr_expr = expression
while isinstance(curr_expr, types.FunctionType):
# If function directly returns another function
# go into until non-function is returned.
curr_expr = curr_expr()
retval = inner_from_python(curr_expr)
retval.rule_name = rule_name
retval.root = True
# Update cache
__rule_cache[rule_name] = retval
if self.debug:
self.dprint("New rule: {} -> {}"
.format(rule_name, retval.__class__.__name__))
elif type(expression) is text or isinstance(expression, _StrMatch):
if type(expression) is text:
retval = _StrMatch(expression,
ignore_case=self.ignore_case)
else:
retval = expression
if expression.ignore_case is None:
expression.ignore_case = self.ignore_case
if self.autokwd:
to_match = retval.to_match
match = self.keyword_regex.match(to_match)
if match and match.span() == (0, len(to_match)):
retval = RegExMatch(r'{}\b'.format(to_match),
ignore_case=self.ignore_case,
str_repr=to_match)
retval.compile()
elif isinstance(expression, RegExMatch):
# Regular expression are not compiled yet
# to support global settings propagation from
# parser.
if expression.ignore_case is None:
expression.ignore_case = self.ignore_case
expression.compile()
retval = expression
elif isinstance(expression, Match):
retval = expression
elif isinstance(expression, UnorderedGroup):
retval = expression
for n in retval.elements:
retval.nodes.append(inner_from_python(n))
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
elif isinstance(expression, _Sequence) or \
isinstance(expression, Repetition) or \
isinstance(expression, SyntaxPredicate) or \
isinstance(expression, Decorator):
retval = expression
retval.nodes.append(inner_from_python(retval.elements))
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
elif type(expression) in [list, tuple]:
if type(expression) is list:
retval = _OrderedChoice(expression)
else:
retval = _Sequence(expression)
retval.nodes = [inner_from_python(e) for e in expression]
if any((isinstance(x, CrossRef) for x in retval.nodes)):
__for_resolving.append(retval)
else:
raise GrammarError("Unrecognized grammar element '%s'." %
text(expression))
# Translate separator expression.
if isinstance(expression, Repetition) and expression.sep:
expression.sep = inner_from_python(expression.sep)
return retval
# Cross-ref resolving
def resolve():
for e in __for_resolving:
for i, node in enumerate(e.nodes):
if isinstance(node, CrossRef):
self.__cross_refs -= 1
e.nodes[i] = __rule_cache[node.target_rule_name]
parser_model = inner_from_python(expression)
resolve()
assert self.__cross_refs == 0, "Not all crossrefs are resolved!"
return parser_model
| (self, expression) |
140 | arpeggio | _parse | null | def _parse(self):
return self.parser_model.parse(self)
| (self) |
143 | arpeggio | errors | null | def errors(self):
pass
| (self) |