code
stringlengths 23
981k
| language
stringclasses 2
values | AST_depth
int64 -1
40
| alphanumeric_fraction
float64 0
1
| max_line_length
int64 0
632k
| avg_line_length
float64 0
15.4k
| num_lines
int64 0
3.86k
| original_docstring
stringlengths 7
42.9k
| source
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
class WeightedGraph:
"""
This class represents a weighted graph for the purposes
of determining clusters via the Markov Clustering Algorithm.
To initialize an object of this class, pass in a dictionary
which maps pairs (tuples) of vertices to the corresponding weight.
Stores internally both an adjacency list and an adjacency matrix
This is fine as the number of expected vertices is small.
"""
def __init__(self, pair_weights):
self.adjacency_list = self._construct_adjacency_list(pair_weights)
self.vertices = list(self.adjacency_list.keys())
self.num_vertices = len(self.vertices)
self.adjacency_matrix = self._construct_adjacency_matrix()
def get_clusters(self, granularity):
"""
This method uses the Markov Clustering Algorithm
to cluster vertices together.
Args:
granularity: The granularity with which to inflate columns
Return:
A dictionary which maps a vertex to the set of vertices it is in a cluster with
"""
# Hardcoded in the expansion parameter, this reflects original implementation
# May wish to change this to have some option
e = 2
matrix = transform_matrix(self.adjacency_matrix)
matrix = normalize_columns(matrix)
error_convergence = np.linalg.norm(matrix)
while error_convergence > 10E-6:
# Store previous matrix
previous_matrix = matrix
matrix = np.linalg.matrix_power(matrix, e)
matrix = inflate_columns(matrix, granularity)
error_convergence = np.linalg.norm(matrix - previous_matrix)
return self._get_clusters(matrix)
def _get_clusters(self, matrix):
"""
Helper function to retrieve the list of clusters from the matrix
"""
# clusters is a set to have only unique sets in the partition of the vertices
clusters = set()
for i, v1 in enumerate(self.vertices):
# Already assigned a cluster
if np.sum(matrix[i, :]) < 10E-6: # If sum of row is essentially zero
continue
else:
cluster = []
for j, v2 in enumerate(self.vertices):
if matrix[i, j] > 10E-6:
cluster.append(v2)
clusters.add(frozenset(cluster))
clusters = [list(cluster) for cluster in clusters]
return clusters
def _construct_adjacency_list(self, pair_weights):
"""
Constructs an adjacency list representation of the graph as
a dictionary which maps vertices to a list of tuples (v, w) where
v is the adjacent vertex and w is the weight of the edge.
Args:
pair_weights: A dictionary mapping pairs of vertices to weights
Returns:
An adjacency list
"""
adjacency_list = {}
for v1, v2 in pair_weights:
weight = pair_weights[(v1, v2)]
if v1 in adjacency_list:
adjacency_list[v1].append((v2, weight))
else:
adjacency_list[v1] = [(v2, weight)]
if v2 in adjacency_list:
adjacency_list[v2].append((v1, weight))
else:
adjacency_list[v2] = [(v1, weight)]
return adjacency_list
def _construct_adjacency_matrix(self):
"""
Constructs an adjacency matrix from the internally stored adjacency list
Assigns M_ij to be the weight from vertex i to vertex j.
Returns:
The numpy matrix storing the weights
"""
adjacency_matrix = np.identity(self.num_vertices)
for i, v1 in enumerate(self.vertices):
for j, v2 in enumerate(self.vertices):
v1_v2_weight = 0
for vertex, weight in self.adjacency_list[v1]:
if v2 == vertex:
v1_v2_weight = weight
break
adjacency_matrix[i][j] = v1_v2_weight
return adjacency_matrix | python | 17 | 0.59216 | 91 | 37.392523 | 107 |
This class represents a weighted graph for the purposes
of determining clusters via the Markov Clustering Algorithm.
To initialize an object of this class, pass in a dictionary
which maps pairs (tuples) of vertices to the corresponding weight.
Stores internally both an adjacency list and an adjacency matrix
This is fine as the number of expected vertices is small.
| class |
class Zone:
"""
Zone with defined boundaries
"""
def topLeft(self):
"""
:rtype: (int, int)
"""
raise NotImplementedError()
def bottomRight(self):
"""
:rtype: (int, int)
"""
raise NotImplementedError()
def center(self):
"""
:rtype: (int, int)
"""
raise NotImplementedError() | python | 8 | 0.464646 | 35 | 17.045455 | 22 |
Zone with defined boundaries
| class |
class GameObject:
"""
The base class for all other classes.
"""
MENU_EVENT = pg.USEREVENT + 1
SCENE_EVENT = pg.USEREVENT + 2
CUTSCENE_EVENT = pg.USEREVENT + 3
CATEGORIES_MENU = [
"screen",
"transition",
"complete",
"health",
"max_health"
]
CATEGORIES_SCENE = [
"screen",
"transition",
"complete",
"pause",
"unpause",
"no_mode",
"start_game",
"switch",
"door",
"death",
"revive"
]
CATEGORIES_CUTSCENE = [
"screen",
"transition"
]
def handleEvent(self, event):
"""
Handles the given event.
:param event: pygame.Event, allowing event-driven programming.
"""
raise NotImplementedError
def update(self):
"""
Updates the logic of the game object every game tick.
"""
raise NotImplementedError
def draw(self, camera=None):
"""
Renders the game object to the screen every game tick.
"""
raise NotImplementedError
def messageMenu(self, category, data=None):
"""
Creates an event that is posted for the menu engine.
:param category: String, the category of the message.
:param data: N-Tuple, containing the data for the relevant category.
"""
self._messageEngine(GameObject.CATEGORIES_MENU,
GameObject.MENU_EVENT,
self.__str__(),
category,
data)
def messageScene(self, category, data=None):
"""
Creates an event that is posted for the scene engine.
:param sender: String, the sender of the message.
:param category: String, the category of the message.
:param data: N-Tuple, containing the data for the relevant category.
"""
self._messageEngine(GameObject.CATEGORIES_SCENE,
GameObject.SCENE_EVENT,
self.__str__(),
category,
data)
def messageCutScene(self, category, data=None):
"""
Creates an event that is posted for the cutscene engine.
:param category: String, the category of the message.
:param data: N-Tuple, containing the data for the relevant category.
"""
self._messageEngine(GameObject.CATEGORIES_CUTSCENE,
GameObject.CUTSCENE_EVENT,
self.__str__(),
category,
data)
def _messageEngine(self, CATEGORIES, EVENT, sender, category, data=None):
"""
Creates an event that is posted to an engine.
:param CATEGORIES: List, containing strings of valid categories.
:param EVENT: pygame.event, the event that the engine handles.
:param sender: String, the sender of the message.
:param category: String, the category of the message.
:param data: N-Tuple, containing the data for the relevant category.
"""
if category not in CATEGORIES:
raise KeyError("'{}' is an invalid category! The categories allowed "
"are {}!".format(category, CATEGORIES))
contents = \
{
"sender": sender,
"category": category,
"data": data
}
message = pg.event.Event(EVENT, contents)
pg.event.post(message) | python | 14 | 0.53008 | 81 | 29.837607 | 117 |
The base class for all other classes.
| class |
class Stream:
"""Represents a single HTTP/2 Stream.
Stream is a bidirectional flow of bytes within an established connection,
which may carry one or more messages. Handles the transfer of HTTP Headers
and Data frames.
Role of this class is to
1. Combine all the data frames
"""
def __init__(
self,
stream_id: int,
request: Request,
protocol: "H2ClientProtocol",
download_maxsize: int = 0,
download_warnsize: int = 0,
) -> None:
"""
Arguments:
stream_id -- Unique identifier for the stream within a single HTTP/2 connection
request -- The HTTP request associated to the stream
protocol -- Parent H2ClientProtocol instance
"""
self.stream_id: int = stream_id
self._request: Request = request
self._protocol: "H2ClientProtocol" = protocol
self._download_maxsize = self._request.meta.get('download_maxsize', download_maxsize)
self._download_warnsize = self._request.meta.get('download_warnsize', download_warnsize)
# Metadata of an HTTP/2 connection stream
# initialized when stream is instantiated
self.metadata: Dict = {
'request_content_length': 0 if self._request.body is None else len(self._request.body),
# Flag to keep track whether the stream has initiated the request
'request_sent': False,
# Flag to track whether we have logged about exceeding download warnsize
'reached_warnsize': False,
# Each time we send a data frame, we will decrease value by the amount send.
'remaining_content_length': 0 if self._request.body is None else len(self._request.body),
# Flag to keep track whether client (self) have closed this stream
'stream_closed_local': False,
# Flag to keep track whether the server has closed the stream
'stream_closed_server': False,
}
# Private variable used to build the response
# this response is then converted to appropriate Response class
# passed to the response deferred callback
self._response: Dict = {
# Data received frame by frame from the server is appended
# and passed to the response Deferred when completely received.
'body': BytesIO(),
# The amount of data received that counts against the
# flow control window
'flow_controlled_size': 0,
# Headers received after sending the request
'headers': Headers({}),
}
def _cancel(_) -> None:
# Close this stream as gracefully as possible
# If the associated request is initiated we reset this stream
# else we directly call close() method
if self.metadata['request_sent']:
self.reset_stream(StreamCloseReason.CANCELLED)
else:
self.close(StreamCloseReason.CANCELLED)
self._deferred_response = Deferred(_cancel)
def __repr__(self):
return f'Stream(id={self.stream_id!r})'
@property
def _log_warnsize(self) -> bool:
"""Checks if we have received data which exceeds the download warnsize
and whether we have not already logged about it.
Returns:
True if both the above conditions hold true
False if any of the conditions is false
"""
content_length_header = int(self._response['headers'].get(b'Content-Length', -1))
return (
self._download_warnsize
and (
self._response['flow_controlled_size'] > self._download_warnsize
or content_length_header > self._download_warnsize
)
and not self.metadata['reached_warnsize']
)
def get_response(self) -> Deferred:
"""Simply return a Deferred which fires when response
from the asynchronous request is available
"""
return self._deferred_response
def check_request_url(self) -> bool:
# Make sure that we are sending the request to the correct URL
url = urlparse(self._request.url)
return (
url.netloc == str(self._protocol.metadata['uri'].host, 'utf-8')
or url.netloc == str(self._protocol.metadata['uri'].netloc, 'utf-8')
or url.netloc == f'{self._protocol.metadata["ip_address"]}:{self._protocol.metadata["uri"].port}'
)
def _get_request_headers(self) -> List[Tuple[str, str]]:
url = urlparse(self._request.url)
path = url.path
if url.query:
path += '?' + url.query
# This pseudo-header field MUST NOT be empty for "http" or "https"
# URIs; "http" or "https" URIs that do not contain a path component
# MUST include a value of '/'. The exception to this rule is an
# OPTIONS request for an "http" or "https" URI that does not include
# a path component; these MUST include a ":path" pseudo-header field
# with a value of '*' (refer RFC 7540 - Section 8.1.2.3)
if not path:
path = '*' if self._request.method == 'OPTIONS' else '/'
# Make sure pseudo-headers comes before all the other headers
headers = [
(':method', self._request.method),
(':authority', url.netloc),
]
# The ":scheme" and ":path" pseudo-header fields MUST
# be omitted for CONNECT method (refer RFC 7540 - Section 8.3)
if self._request.method != 'CONNECT':
headers += [
(':scheme', self._protocol.metadata['uri'].scheme),
(':path', path),
]
content_length = str(len(self._request.body))
headers.append(('Content-Length', content_length))
content_length_name = self._request.headers.normkey(b'Content-Length')
for name, values in self._request.headers.items():
for value in values:
value = str(value, 'utf-8')
if name == content_length_name:
if value != content_length:
logger.warning(
'Ignoring bad Content-Length header %r of request %r, '
'sending %r instead',
value,
self._request,
content_length,
)
continue
headers.append((str(name, 'utf-8'), value))
return headers
def initiate_request(self) -> None:
if self.check_request_url():
headers = self._get_request_headers()
self._protocol.conn.send_headers(self.stream_id, headers, end_stream=False)
self.metadata['request_sent'] = True
self.send_data()
else:
# Close this stream calling the response errback
# Note that we have not sent any headers
self.close(StreamCloseReason.INVALID_HOSTNAME)
def send_data(self) -> None:
"""Called immediately after the headers are sent. Here we send all the
data as part of the request.
If the content length is 0 initially then we end the stream immediately and
wait for response data.
Warning: Only call this method when stream not closed from client side
and has initiated request already by sending HEADER frame. If not then
stream will raise ProtocolError (raise by h2 state machine).
"""
if self.metadata['stream_closed_local']:
raise StreamClosedError(self.stream_id)
# Firstly, check what the flow control window is for current stream.
window_size = self._protocol.conn.local_flow_control_window(stream_id=self.stream_id)
# Next, check what the maximum frame size is.
max_frame_size = self._protocol.conn.max_outbound_frame_size
# We will send no more than the window size or the remaining file size
# of data in this call, whichever is smaller.
bytes_to_send_size = min(window_size, self.metadata['remaining_content_length'])
# We now need to send a number of data frames.
while bytes_to_send_size > 0:
chunk_size = min(bytes_to_send_size, max_frame_size)
data_chunk_start_id = self.metadata['request_content_length'] - self.metadata['remaining_content_length']
data_chunk = self._request.body[data_chunk_start_id:data_chunk_start_id + chunk_size]
self._protocol.conn.send_data(self.stream_id, data_chunk, end_stream=False)
bytes_to_send_size = bytes_to_send_size - chunk_size
self.metadata['remaining_content_length'] = self.metadata['remaining_content_length'] - chunk_size
self.metadata['remaining_content_length'] = max(0, self.metadata['remaining_content_length'])
# End the stream if no more data needs to be send
if self.metadata['remaining_content_length'] == 0:
self._protocol.conn.end_stream(self.stream_id)
# Q. What about the rest of the data?
# Ans: Remaining Data frames will be sent when we get a WindowUpdate frame
def receive_window_update(self) -> None:
"""Flow control window size was changed.
Send data that earlier could not be sent as we were
blocked behind the flow control.
"""
if (
self.metadata['remaining_content_length']
and not self.metadata['stream_closed_server']
and self.metadata['request_sent']
):
self.send_data()
def receive_data(self, data: bytes, flow_controlled_length: int) -> None:
self._response['body'].write(data)
self._response['flow_controlled_size'] += flow_controlled_length
# We check maxsize here in case the Content-Length header was not received
if self._download_maxsize and self._response['flow_controlled_size'] > self._download_maxsize:
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED)
return
if self._log_warnsize:
self.metadata['reached_warnsize'] = True
warning_msg = (
f'Received more ({self._response["flow_controlled_size"]}) bytes than download '
f'warn size ({self._download_warnsize}) in request {self._request}'
)
logger.warning(warning_msg)
# Acknowledge the data received
self._protocol.conn.acknowledge_received_data(
self._response['flow_controlled_size'],
self.stream_id
)
def receive_headers(self, headers: List[HeaderTuple]) -> None:
for name, value in headers:
self._response['headers'][name] = value
# Check if we exceed the allowed max data size which can be received
expected_size = int(self._response['headers'].get(b'Content-Length', -1))
if self._download_maxsize and expected_size > self._download_maxsize:
self.reset_stream(StreamCloseReason.MAXSIZE_EXCEEDED)
return
if self._log_warnsize:
self.metadata['reached_warnsize'] = True
warning_msg = (
f'Expected response size ({expected_size}) larger than '
f'download warn size ({self._download_warnsize}) in request {self._request}'
)
logger.warning(warning_msg)
def reset_stream(self, reason: StreamCloseReason = StreamCloseReason.RESET) -> None:
"""Close this stream by sending a RST_FRAME to the remote peer"""
if self.metadata['stream_closed_local']:
raise StreamClosedError(self.stream_id)
# Clear buffer earlier to avoid keeping data in memory for a long time
self._response['body'].truncate(0)
self.metadata['stream_closed_local'] = True
self._protocol.conn.reset_stream(self.stream_id, ErrorCodes.REFUSED_STREAM)
self.close(reason)
def close(
self,
reason: StreamCloseReason,
errors: Optional[List[BaseException]] = None,
from_protocol: bool = False,
) -> None:
"""Based on the reason sent we will handle each case.
"""
if self.metadata['stream_closed_server']:
raise StreamClosedError(self.stream_id)
if not isinstance(reason, StreamCloseReason):
raise TypeError(f'Expected StreamCloseReason, received {reason.__class__.__qualname__}')
# Have default value of errors as an empty list as
# some cases can add a list of exceptions
errors = errors or []
if not from_protocol:
self._protocol.pop_stream(self.stream_id)
self.metadata['stream_closed_server'] = True
# We do not check for Content-Length or Transfer-Encoding in response headers
# and add `partial` flag as in HTTP/1.1 as 'A request or response that includes
# a payload body can include a content-length header field' (RFC 7540 - Section 8.1.2.6)
# NOTE: Order of handling the events is important here
# As we immediately cancel the request when maxsize is exceeded while
# receiving DATA_FRAME's when we have received the headers (not
# having Content-Length)
if reason is StreamCloseReason.MAXSIZE_EXCEEDED:
expected_size = int(self._response['headers'].get(
b'Content-Length',
self._response['flow_controlled_size'])
)
error_msg = (
f'Cancelling download of {self._request.url}: received response '
f'size ({expected_size}) larger than download max size ({self._download_maxsize})'
)
logger.error(error_msg)
self._deferred_response.errback(CancelledError(error_msg))
elif reason is StreamCloseReason.ENDED:
self._fire_response_deferred()
# Stream was abruptly ended here
elif reason is StreamCloseReason.CANCELLED:
# Client has cancelled the request. Remove all the data
# received and fire the response deferred with no flags set
# NOTE: The data is already flushed in Stream.reset_stream() called
# immediately when the stream needs to be cancelled
# There maybe no :status in headers, we make
# HTTP Status Code: 499 - Client Closed Request
self._response['headers'][':status'] = '499'
self._fire_response_deferred()
elif reason is StreamCloseReason.RESET:
self._deferred_response.errback(ResponseFailed([
Failure(
f'Remote peer {self._protocol.metadata["ip_address"]} sent RST_STREAM',
ProtocolError
)
]))
elif reason is StreamCloseReason.CONNECTION_LOST:
self._deferred_response.errback(ResponseFailed(errors))
elif reason is StreamCloseReason.INACTIVE:
errors.insert(0, InactiveStreamClosed(self._request))
self._deferred_response.errback(ResponseFailed(errors))
else:
assert reason is StreamCloseReason.INVALID_HOSTNAME
self._deferred_response.errback(InvalidHostname(
self._request,
str(self._protocol.metadata['uri'].host, 'utf-8'),
f'{self._protocol.metadata["ip_address"]}:{self._protocol.metadata["uri"].port}'
))
def _fire_response_deferred(self) -> None:
"""Builds response from the self._response dict
and fires the response deferred callback with the
generated response instance"""
body = self._response['body'].getvalue()
response_cls = responsetypes.from_args(
headers=self._response['headers'],
url=self._request.url,
body=body,
)
response = response_cls(
url=self._request.url,
status=int(self._response['headers'][':status']),
headers=self._response['headers'],
body=body,
request=self._request,
certificate=self._protocol.metadata['certificate'],
ip_address=self._protocol.metadata['ip_address'],
protocol='h2',
)
self._deferred_response.callback(response) | python | 21 | 0.602322 | 117 | 40.643038 | 395 | Represents a single HTTP/2 Stream.
Stream is a bidirectional flow of bytes within an established connection,
which may carry one or more messages. Handles the transfer of HTTP Headers
and Data frames.
Role of this class is to
1. Combine all the data frames
| class |
class Meta:
"""
Meta class. Getting fields.
"""
model = Chat
fields = ('base_image', ) | python | 7 | 0.436508 | 35 | 20.166667 | 6 |
Meta class. Getting fields.
| class |
class Meta:
"""
Meta class. Getting fields.
"""
model = MessageImages
fields = ('image',) | python | 7 | 0.465116 | 35 | 20.666667 | 6 |
Meta class. Getting fields.
| class |
class ScopeFilterValidator:
"""
The scope filter validator validates whether prefixes, ASNs or RPSL
objects fall within the configured scope filter.
"""
def __init__(self):
self.load_filters()
def load_filters(self):
"""
(Re)load the local cache of the configured filters.
Also called by __init__
"""
prefixes = get_setting('scopefilter.prefixes', [])
self.filtered_prefixes = [IP(prefix) for prefix in prefixes]
self.filtered_asns = set()
self.filtered_asn_ranges = set()
asn_filters = get_setting('scopefilter.asns', [])
for asn_filter in asn_filters:
if '-' in str(asn_filter):
start, end = asn_filter.split('-')
self.filtered_asn_ranges.add((int(start), int(end)))
else:
self.filtered_asns.add(int(asn_filter))
def validate(self, source: str, prefix: Optional[IP]=None, asn: Optional[int]=None) -> ScopeFilterStatus:
"""
Validate a prefix and/or ASN, for a particular source.
Returns a tuple of a ScopeFilterStatus and an explanation string.
"""
if not prefix and asn is None:
raise ValueError('Scope Filter validator must be provided asn or prefix')
if get_setting(f'sources.{source}.scopefilter_excluded'):
return ScopeFilterStatus.in_scope
if prefix:
for filtered_prefix in self.filtered_prefixes:
if prefix.version() == filtered_prefix.version() and filtered_prefix.overlaps(prefix):
return ScopeFilterStatus.out_scope_prefix
if asn is not None:
if asn in self.filtered_asns:
return ScopeFilterStatus.out_scope_as
for range_start, range_end in self.filtered_asn_ranges:
if range_start <= asn <= range_end:
return ScopeFilterStatus.out_scope_as
return ScopeFilterStatus.in_scope
def _validate_rpsl_data(self, source: str, object_class: str, prefix: Optional[IP],
asn_first: Optional[int]) -> Tuple[ScopeFilterStatus, str]:
"""
Validate whether a particular set of RPSL data is in scope.
Depending on object_class, members and mp_members are also validated.
Returns a ScopeFilterStatus.
"""
out_of_scope = [ScopeFilterStatus.out_scope_prefix, ScopeFilterStatus.out_scope_as]
if object_class not in ['route', 'route6']:
return ScopeFilterStatus.in_scope, ''
if prefix:
prefix_state = self.validate(source, prefix)
if prefix_state in out_of_scope:
return prefix_state, f'prefix {prefix} is out of scope'
if asn_first is not None:
asn_state = self.validate(source, asn=asn_first)
if asn_state in out_of_scope:
return asn_state, f'ASN {asn_first} is out of scope'
return ScopeFilterStatus.in_scope, ''
def validate_rpsl_object(self, rpsl_object: RPSLObject) -> Tuple[ScopeFilterStatus, str]:
"""
Validate whether an RPSLObject is in scope.
Returns a tuple of a ScopeFilterStatus and an explanation string.
"""
return self._validate_rpsl_data(
rpsl_object.source(),
rpsl_object.rpsl_object_class,
rpsl_object.prefix,
rpsl_object.asn_first,
)
def validate_all_rpsl_objects(self, database_handler: DatabaseHandler) -> \
Tuple[List[Dict[str, str]], List[Dict[str, str]], List[Dict[str, str]]]:
"""
Apply the scope filter to all relevant objects.
Retrieves all routes from the DB, and aggregates the validation results.
Returns a tuple of three sets:
- one with routes that should be set to status in_scope, but are not now
- one with routes that should be set to status out_scope_as, but are not now
- one with routes that should be set to status out_scope_prefix, but are not now
Each object is recorded as a dict, which has the fields shown
in "columns" below.
Objects where their current status in the DB matches the new
validation result, are not included in the return value.
"""
columns = ['rpsl_pk', 'ip_first', 'prefix_length', 'asn_first', 'source', 'object_class',
'object_text', 'scopefilter_status']
objs_changed: Dict[ScopeFilterStatus, List[Dict[str, str]]] = defaultdict(list)
q = RPSLDatabaseQuery(column_names=columns, enable_ordering=False)
q = q.object_classes(['route', 'route6'])
results = database_handler.execute_query(q)
for result in results:
current_status = result['scopefilter_status']
result['old_status'] = current_status
prefix = None
if result['ip_first']:
prefix = IP(result['ip_first'] + '/' + str(result['prefix_length']))
new_status, _ = self._validate_rpsl_data(
result['source'],
result['object_class'],
prefix,
result['asn_first'],
)
if new_status != current_status:
result['scopefilter_status'] = new_status
objs_changed[new_status].append(result)
return (objs_changed[ScopeFilterStatus.in_scope],
objs_changed[ScopeFilterStatus.out_scope_as],
objs_changed[ScopeFilterStatus.out_scope_prefix]) | python | 18 | 0.60043 | 109 | 42.232558 | 129 |
The scope filter validator validates whether prefixes, ASNs or RPSL
objects fall within the configured scope filter.
| class |
class Client:
""" Represents a client entry """
def __init__(self, cid, cname, public_key, last_seen):
self.ID = bytes.fromhex(cid) # Unique client ID, 16 bytes.
self.Name = cname # Client's name, null terminated ascii string, 255 bytes.
self.PublicKey = public_key # Client's public key, 160 bytes.
self.LastSeen = last_seen # The Date & time of client's last request.
def validate(self):
""" Validate Client attributes according to the requirements """
if not self.ID or len(self.ID) != protocol.CLIENT_ID_SIZE:
return False
if not self.Name or len(self.Name) >= protocol.NAME_SIZE:
return False
if not self.PublicKey or len(self.PublicKey) != protocol.PUBLIC_KEY_SIZE:
return False
if not self.LastSeen:
return False
return True | python | 11 | 0.618016 | 84 | 42.9 | 20 | Represents a client entry | class |
class Message:
""" Represents a message entry """
def __init__(self, to_client, from_client, mtype, content):
self.ID = 0 # Message ID, 4 bytes.
self.ToClient = to_client # Receiver's unique ID, 16 bytes.
self.FromClient = from_client # Sender's unique ID, 16 bytes.
self.Type = mtype # Message type, 1 byte.
self.Content = content # Message's content, Blob.
def validate(self):
""" Validate Message attributes according to the requirements """
if not self.ToClient or len(self.ToClient) != protocol.CLIENT_ID_SIZE:
return False
if not self.FromClient or len(self.FromClient) != protocol.CLIENT_ID_SIZE:
return False
if not self.Type or self.Type > protocol.MSG_TYPE_MAX:
return False
return True | python | 11 | 0.620939 | 82 | 42.789474 | 19 | Represents a message entry | class |
class HER:
"""HER (final strategy).
Attributes:
desired_states (np.ndarray): desired states
reward_func (Callable): returns reward from state, action, next_state
"""
def __init__(self, demo_path: str, reward_func: Callable = default_reward_func):
"""Initialization.
Args:
demo_path (str): path of demonstration including desired states
reward_func (Callable): returns reward from state, action, next_state
"""
self.desired_states, self.demo_goal_indices = fetch_desired_states_from_demo(
demo_path
)
self.reward_func = reward_func
def sample_desired_state(self) -> np.ndarray:
"""Sample one of the desired states."""
return np.random.choice(self.desired_states, 1)[0]
def generate_demo_transitions(self, demo: list) -> list:
"""Return generated demo transitions for HER."""
new_demo: list = list()
# generate demo transitions
prev_idx = 0
for idx in self.demo_goal_indices:
demo_final_state = demo[idx][0]
transitions = [demo[i] for i in range(prev_idx, idx + 1)]
prev_idx = idx + 1
transitions = self.generate_transitions(
transitions, demo_final_state, demo=True
)
new_demo.extend(transitions)
return new_demo
def generate_transitions(
self, transitions: list, desired_state: np.ndarray, demo: bool = False
) -> list:
"""Generate new transitions concatenated with desired states."""
new_transitions = list()
final_state = transitions[-1][0]
for transition in transitions:
# process transitions with the initial goal state
new_transitions.append(self.__get_transition(transition, desired_state))
if not demo:
new_transitions.append(self.__get_transition(transition, final_state))
return new_transitions
def __get_transition(self, transition: tuple, goal_state: np.ndarray):
"""Get a single transition concatenated with a goal state."""
state, action, _, next_state, done = transition
done = np.array_equal(state, goal_state)
reward = self.reward_func(state, action, goal_state)
state = np.concatenate((state, goal_state), axis=-1)
next_state = np.concatenate((next_state, goal_state), axis=-1)
return (state, action, reward, next_state, done) | python | 14 | 0.611266 | 86 | 35.289855 | 69 | HER (final strategy).
Attributes:
desired_states (np.ndarray): desired states
reward_func (Callable): returns reward from state, action, next_state
| class |
class HostRemoval:
'''
PCA-decompose a saturated host star PSF and remove it
'''
def __init__(self,
n_PCA,
outdir,
abs_PCA_name,
config_data = config):
'''
INPUTS:
n_PCA: number of principal components to use
outdir: directory to deposit the host-subtracted images in (this has to be
defined at the function call because the images may or may not
contain fake planet PSFs, and I want to keep them separate)
abs_PCA_name: absolute file name of the PCA cube to reconstruct the host star
for making a fake planet (i.e., without saturation effects)
config_data: configuration data, as usual
'''
self.n_PCA = n_PCA
self.outdir = outdir
self.abs_PCA_name = abs_PCA_name
self.config_data = config_data
# read in the PCA vector cube for this series of frames
# (note the PCA needs to correspond to saturated PSFs, since I am subtracting
# saturated PSFs away)
self.pca_basis_cube_sat, self.header_pca_basis_cube_sat = fits.getdata(self.abs_PCA_name, 0, header=True)
##########
def __call__(self,
abs_sci_name):
'''
Reconstruct and inject, for a single frame so as to parallelize the job
INPUTS:
abs_sci_name: the absolute path of the science frame into which we want to inject a planet
'''
print(abs_sci_name)
# read in the cutout science frame
# (there should be no masking of this frame downstream)
sci, header_sci = fits.getdata(abs_sci_name, 0, header=True)
# define the mask of this science frame
## ## fine-tune this step later!
mask_weird = np.ones(np.shape(sci))
no_mask = np.copy(mask_weird) # a non-mask for reconstructing saturated PSFs
#mask_weird[sci > 1e8] = np.nan # mask saturating region
## TEST: WRITE OUT
#hdu = fits.PrimaryHDU(mask_weird)
#hdulist = fits.HDUList([hdu])
#hdu.writeto("junk_mask.fits", clobber=True)
## END TEST
###########################################
# PCA-decompose the host star PSF
# (note no de-rotation of the image here)
# do the PCA fit of masked host star
# returns dict: 'pca_vector': the PCA best-fit vector; and 'recon_2d': the 2D reconstructed PSF
# N.b. PCA reconstruction will be to get an UN-sat PSF; note PCA basis cube involves unsat PSFs
fit_unsat = fit_pca_star(self.pca_basis_cube_sat, sci, no_mask, n_PCA=100)
# subtract the PCA-reconstructed host star
image_host_removed = np.subtract(sci,fit_unsat["recon_2d"])
# pickle the PCA vector
pickle_stuff = {"pca_cube_file_name": self.abs_PCA_name,
"pca_vector": fit_unsat["pca_vector"],
"recons_2d_psf_unsat": fit_unsat["recon_2d"],
"sci_image_name": abs_sci_name}
print(pickle_stuff)
pca_fit_pickle_write_name = str(self.config_data["data_dirs"]["DIR_PICKLE"]) \
+ "pickle_pca_sat_psf_info_" + str(os.path.basename(abs_sci_name).split(".")[0]) + ".pkl"
print(pca_fit_pickle_write_name)
with open(pca_fit_pickle_write_name, "wb") as f:
pickle.dump(pickle_stuff, f)
# add info to the header indicating last reduction step, and PCA info
header_sci["RED_STEP"] = "host_removed"
# write FITS file out, with fake planet params in file name
## ## do I actually want to write out a separate FITS file for each fake planet?
abs_image_host_removed_name = str(self.outdir + os.path.basename(abs_sci_name))
fits.writeto(filename = abs_image_host_removed_name,
data = image_host_removed,
header = header_sci,
overwrite = True)
print("Writing out host_removed frame " + os.path.basename(abs_sci_name)) | python | 17 | 0.573561 | 113 | 41.387755 | 98 |
PCA-decompose a saturated host star PSF and remove it
| class |
class MeshLoader:
"""
Class to load the meshes for the objects in a scene.
"""
def __init__(self):
"""Module initializer"""
self.base_dir = CONSTANTS.MESH_BASE_DIR
self.text_dir = CONSTANTS.TEXT_BASE_DIR
self.reset()
def reset(self):
self.loaded_meshes = []
def get_meshes(self):
""" """
extract_singular = lambda x: x[0] if len(x) == 1 else x
return [extract_singular(item) for item in self.loaded_meshes]
def load_meshes(self, obj_info: List[object_info.ObjectInfo], **kwargs):
"""
Loads the meshes whose information is given in parameter 'obj_info.
Each call of this method APPENDS a list to the loaded_meshes attribute.
:param obj_info: The object information of the meshes to be loaded.
:param kwargs: additional mesh modifiers such as scale, specified with a leading 'mod_'
"""
paths = []
for obj in obj_info:
path = self.text_dir if obj.name.endswith("_floor") or obj.name.endswith("_wall") else self.base_dir
paths.append((path / obj.mesh_fp).resolve())
scales = [obj.scale for obj in obj_info]
class_ids = [obj.class_id for obj in obj_info]
mod_scales = kwargs.get("mod_scale", [1.0] * len(scales))
scales = [s * ms for (s, ms) in zip(scales, mod_scales)]
flags = [mesh_flags(obj) for obj in obj_info]
meshes = sl.Mesh.load_threaded(filenames=paths, flags=flags)
# Setup class IDs
for _, (mesh, scale, class_id) in enumerate(zip(meshes, scales, class_ids)):
pt = torch.eye(4)
pt[:3, :3] *= scale
mesh.pretransform = pt
mesh.class_index = class_id
info_mesh_tuples = list(zip(obj_info, meshes))
self.loaded_meshes.append(info_mesh_tuples) | python | 15 | 0.595161 | 112 | 39.456522 | 46 |
Class to load the meshes for the objects in a scene.
| class |
class ObjectLoader:
"""
Class to load the objects in a scene
"""
def __init__(self):
"""Module initializer"""
self.reset()
def reset(self):
self.instance_idx = 0
self.loaded_objects = dict()
@property
def static_objects(self):
return [obj for obj in self.loaded_objects.values() if obj.static]
@property
def dynamic_objects(self):
return [obj for obj in self.loaded_objects.values() if not obj.static]
def create_object(self, object_info: object_info.ObjectInfo, mesh: sl.Mesh, is_static: bool, **obj_mod):
"""
Proper object setup
:param mesh:
:param object_info:
:param is_static:
:param obj_mod: Optional object modifiers, specified with a leading 'mod_'.
IMPORTANT: scaling is done during mesh loading!!!
:return:
"""
ins_idx = self.instance_idx + 1
self.instance_idx += 1
obj = sl.Object(mesh)
mod_weight = obj_mod.get("mod_weight", obj_mod.get("mod_scale", 1.0) ** 3)
obj.mass = object_info.weight * mod_weight
obj.metallic = object_info.metallic
obj.roughness = object_info.roughness
obj.restitution = object_info.restitution
obj.static_friction = object_info.static_friction
obj.dynamic_friction = object_info.dynamic_friction
pose = obj_mod.get("mod_pose", torch.eye(4))
mod_R = obj_mod.get("mod_R", torch.eye(3))
pose[:3, :3] = torch.mm(mod_R, pose[:3, :3])
mod_t = obj_mod.get("mod_t", torch.tensor([obj_mod.get("mod_x", 0.0),
obj_mod.get("mod_y", 0.0),
obj_mod.get("mod_z", 0.0)]))
pose[:3, 3] += mod_t
obj.set_pose(pose)
obj.linear_velocity = obj_mod.get("mod_v_linear", torch.tensor([0.0, 0.0, 0.0]))
obj.angular_velocity = obj_mod.get("mod_v_angular", torch.tensor([0.0, 0.0, 0.0]))
obj.static = is_static
obj.instance_index = ins_idx
self.loaded_objects[ins_idx] = obj
return obj
def remove_object(self, instance_id, decrement_ins_idx=True):
obj = self.loaded_objects.pop(instance_id, None)
if decrement_ins_idx and obj is not None:
self.instance_idx -= 1
return obj | python | 15 | 0.564557 | 108 | 37.241935 | 62 |
Class to load the objects in a scene
| class |
class DecoratorLoader:
"""
Class to add random decorative objects to the scene, which do not participate of the scene dynamics.
It is based on creating an occupancy matrix of the scene, finding empty locations and placing stuff there
"""
def __init__(self, scene):
""" Object initializer """
self.config = SCENARIO_DEFAULTS["decorator"]
decorations = self.config["decorations"]
bounds = self.config["bounds"]
self.bounds = bounds
self.pi = torch.acos(torch.zeros(1))
self.scene = scene
self.mesh_loader = MeshLoader()
self.mesh_loader.load_meshes(decorations),
self.meshes = self.mesh_loader.get_meshes()[0]
self.x_vect = torch.arange(bounds["min_x"], bounds["max_x"] + bounds["res"], bounds["res"])
self.y_vect = torch.arange(bounds["min_y"], bounds["max_y"] + bounds["res"], bounds["res"])
return
def add_object(self, object_loader, object_id):
""" Loading an object and adding to the loader """
obj_info, obj_mesh = self.meshes[object_id]
pose = torch.eye(4)
obj_mod = {"mod_pose": pose}
obj = object_loader.create_object(obj_info, obj_mesh, True, **obj_mod)
self.scene.add_object(obj)
# shifting object to a free position and adjusting z-coord to be aligned with the table
position = self.occ_matrix.find_free_spot(obj=obj)
pose[:2, -1] = position if position is not None else torch.ones(2)
pose[2, -1] += obj.mesh.bbox.max[-1]
# Rotating object in yaw direction
yaw_angle = random.choice([torch.tensor([i * CONSTANTS.PI / 2]) for i in range(4)])
angles = torch.cat([yaw_angle, torch.zeros(2)])
rot_matrix = utils.get_rot_matrix(angles=angles)
pose[:3, :3] = pose[:3, :3] @ rot_matrix
obj.set_pose(pose)
self.occ_matrix.update_occupancy_matrix(obj)
self.occ_matrix.add_object_margings()
return
def decorate_scene(self, object_loader):
""" Randomly adding some decoderation to a scene """
# initializing occupancy matrix
self.occ_matrix = OccupancyMatrix(bounds=self.bounds, objects=self.scene.objects)
# iteratively placing objects while avoiding collision
N = torch.randint(low=self.config["min_objs"], high=self.config["max_objs"], size=(1,))
for i in range(N):
id = torch.randint(low=0, high=len(self.meshes), size=(1,))
self.add_object(object_loader, object_id=id)
return | python | 16 | 0.622553 | 109 | 41.583333 | 60 |
Class to add random decorative objects to the scene, which do not participate of the scene dynamics.
It is based on creating an occupancy matrix of the scene, finding empty locations and placing stuff there
| class |
class Source:
'''
Source class to define source objects
'''
def __init__(self,id,name,category):
self.id = id
self.name = name
self.category = category | python | 8 | 0.5625 | 41 | 20.444444 | 9 |
Source class to define source objects
| class |
class Article:
'''
Article class to define article objects
'''
def __init__(self, name, author, title, description, link, image, publishDate):
self.name = name
self.author = author
self.title = title
self.description = description
self.link = link
self.image = image
self.publishDate = publishDate | python | 8 | 0.597855 | 84 | 25.714286 | 14 |
Article class to define article objects
| class |
class Top:
'''
Top headlines class to define headlines objects
'''
def __init__(self, source, author, title, description, link, image):
self.source = source
self.author = author
self.title = title
self.description = description
self.link = link
self.image = image | python | 8 | 0.590909 | 73 | 24.461538 | 13 |
Top headlines class to define headlines objects
| class |
class ArgoWorflow:
"""The ArgoWorflow provide a way to start an argo WF based on an existing template.
"""
def __init__(self):
"""Initialize the ArgoWorflow
"""
logger.info("Reading configuration files")
logger.info(f"Argo config file > {ARGO_CONFIG}")
try:
with open(ARGO_CONFIG, 'r') as configfile:
argoconfig = yaml.load(configfile, Loader=yaml.SafeLoader)
# read mandatory parameters
self.server = argoconfig['argoserver']['server']
self.ns = argoconfig['argoserver']['namespace']
self.sa = argoconfig['argoserver']['serviceaccount']
self.template = argoconfig['argoserver']['template']
except OSError as err:
raise Exception(f'Could not read argo configuration: {err}')
except KeyError as err:
raise Exception(f'Missing mandatory configuration key: {err}')
except Exception as err:
raise Exception(f'Unknown error when reading settings: {err}')
# read non-mandatory parameters
self.proto = argoconfig['argoserver'].get('protocol', 'http')
self.param_name = argoconfig['argoserver'].get('event_param_name', 'event')
self.base64_encode = argoconfig['argoserver'].get('base64_encode', False)
self.raw_labels = argoconfig['argoserver'].get('labels', [])
# set a from:veba label
self.labels = ["from=veba"]
# add configured labels
for label in self.raw_labels:
self.labels.append(f"{label}={self.raw_labels[label]}")
def submit(self, event: dict):
"""Submit the workflow
Args:
event (dict): event data
"""
logger.debug("Preparing request data")
uri = f"{self.proto}://{self.server}/api/v1/workflows/{self.ns}/submit"
self.labels.append(f"event_id={event.get('id')}")
self.labels.append(f"event_subject={event.get('subject')}")
# base64 convertion
if self.base64_encode:
event_data = base64.b64encode(
json.dumps(event).encode('utf-8')
).decode()
else:
event_data = json.dumps(event)
# prepare the workflow data
data = {
"resourceKind": "WorkflowTemplate",
"resourceName": self.template,
"submitOptions": {
"serviceaccount": self.sa,
"parameters": [
f"{self.param_name}={event_data}"
],
"labels": ','.join(self.labels)
}
}
logger.debug(json.dumps(data, indent=4, sort_keys=True))
headers = { "Content-Type": "application/json" }
logger.info("Submiting workflow")
try:
r = requests.post(uri, json=data, headers=headers)
logger.debug(r.text)
r.raise_for_status()
except requests.exceptions.HTTPError:
return f"Invalid status code returned: {r.status_code}"
except Exception as err:
return f"Unable to make request to argo server {self.server}: {err}", 500
return "Argo workflow was successfully submited", 200 | python | 17 | 0.56775 | 87 | 41.934211 | 76 | The ArgoWorflow provide a way to start an argo WF based on an existing template.
| class |
class TimeseriesPlot:
"""
Object describes a 1D timeseries.
Attributes:
x (np.ndarray) - independent variable
y (np.ndarray) - dependent variable
ax (matplotlib.axes.AxesSubplot)
"""
def __init__(self, x, y, ax=None):
"""
Instantiate a 1D timeseries.
Args:
x (np.ndarray) - independent variable
y (np.ndarray) - dependent variable
ax (matplotlib.axes.AxesSubplot)
"""
self.x = x
self.y = y
# set axis
if ax is None:
ax = self.create_figure()
self.ax = ax
def create_figure(self):
""" Instantiate figure. """
fig, ax = plt.subplots(ncols=1, figsize=(3, 2))
ax.set_xlim(self.x.min(), self.x.max())
ax.set_ylim(0, 1.1*self.y.max())
ax.set_xlabel('Time (h)'),
ax.set_ylabel('Expression (a.u.)')
return ax
def scatter(self,
color='k',
alpha=1,
s=1,
rasterized=False,
**additional):
"""
Scatterplot markers for x and y data.
Args:
color (str) - marker color
alpha (float) - marker alpha
s (float) - marker size
rasterized (bool) - if True, rasterize markers
"""
marker_kw = dict(color=color, s=s, alpha=alpha, lw=0, rasterized=rasterized)
_ = self.ax.scatter(self.x, self.y, **marker_kw, **additional)
def average(self,
ma_type='savgol',
window_size=100,
resolution=1,
smooth=True,
color='k',
alpha=1,
lw=1,
linestyle=None,
**additional
):
"""
Plot moving average of x and y data.
Args:
ma_type (str) - type of average, 'savgol', 'sliding', or 'binned'
window_size (int) - size of sliding window or bin (num of cells)
resolution (int) - sampling resolution for confidence interval
smooth (bool) - if True, apply secondary savgol filter
color, alpha, lw, linestyle - formatting parameters
"""
ma_kw = dict(ma_type=ma_type, window_size=window_size, resolution=resolution, smooth=smooth)
line_kw = dict(line_color=color, line_alpha=alpha, line_width=lw, linestyle=linestyle)
if len(self.y) > window_size:
_ = plot_mean(self.x, self.y, ax=self.ax, **ma_kw, **line_kw, **additional)
def interval(self,
ma_type='sliding',
window_size=100,
resolution=25,
nbootstraps=1000,
confidence=95,
color='k',
alpha=0.5,
**additional):
"""
Plot confidence interval for moving average of x and y data.
Args:
ma_type (str) - type of moving average, 'sliding' or 'binned'
window_size (int) - size of sliding window or bin (num of cells)
resolution (int) - sampling resolution for confidence interval
nbootstraps (int) - number of bootstraps
confidence (float) - confidence interval, between 0 and 100
color, alpha - formatting parameters
"""
# define moving average keyword arguments
ma_kw = dict(ma_type=ma_type,
window_size=window_size,
resolution=resolution,
nbootstraps=nbootstraps,
confidence=confidence)
# define interval shading keyword arguments
shade_kw = dict(color=color, alpha=alpha)
# plot confidence interval
if len(self.y) > window_size:
plot_mean_interval(self.x,
self.y,
ax=self.ax,
**ma_kw,
**shade_kw)
def plot(self,
scatter=False,
average=True,
interval=False,
marker_kw={},
line_kw={},
interval_kw={},
ma_kw={}):
"""
Plot timeseries data.
Args:
scatter (bool) - if True, add datapoints
average (bool) - if True, add moving average
interval (bool) - if True, add moving average interval
marker_kw (dict) - keyword arguments for marker formatting
line_kw (dict) - keyword arguments for line formatting
interval_kw (dict) - keyword arguments for interval formatting
ma_kw (dict) - keyword arguments for moving average
"""
# add scattered data
if scatter:
self.scatter(**marker_kw)
# add moving average
if average:
self.average(**ma_kw, **line_kw)
# add confidence interval for moving average
if interval:
self.interval(**ma_kw, **interval_kw) | python | 13 | 0.510139 | 100 | 25.760638 | 188 |
Object describes a 1D timeseries.
Attributes:
x (np.ndarray) - independent variable
y (np.ndarray) - dependent variable
ax (matplotlib.axes.AxesSubplot)
| class |
class Sparkfun_QwiicJoystick:
"""CircuitPython class for the Sparkfun QwiicJoystick
Usage:
# import the CircuitPython board and busio libraries
import board
import busio
# Create bus object using the board's I2C port
i2c = busio.I2C(board.SCL, board.SDA)
joystick = QwiicJoystick(i2c) # default address is 0x20
# use QwiicJoystick(i2c, address) for a different address
# joystick = QwiicJoystick(i2c, 0x21)"""
def __init__(self, i2c, address=QWIIC_JOYSTICK_ADDR, debug=False):
"""Initialize Qwiic Joystick for i2c communication."""
self._device = I2CDevice(i2c, address)
# save handle to i2c bus in case address is changed
self._i2c = i2c
self._debug = debug
# public properites
@property
def connected(self):
"""True if the Joystick is connected and a valid id is successful read."""
try:
# Attempt to read the id and see if we get an error
self._read_register(_JOYSTICK_ID)
except ValueError:
return False
return True
@property
def version(self):
"""Firmware version string for joystick."""
major = self._read_register(_JOYSTICK_VERSION1)
minor = self._read_register(_JOYSTICK_VERSION2)
return "v" + str(major) + "." + str(minor)
@property
def horizontal(self):
"""X value from 0 - 1023 of the joystick postion."""
# Read MSB for horizontal joystick position
x_msb = self._read_register(_JOYSTICK_X_MSB)
# Read LSB for horizontal joystick position
x_lsb = self._read_register(_JOYSTICK_X_LSB)
# mask off bytes and combine into 10-bit integer
x = ((x_msb & 0xFF) << 8 | (x_lsb & 0xFF)) >> 6
return x
@property
def vertical(self):
"""Y value from 0 to 1023 of the joystick postion."""
# Read MSB for veritical joystick position
y_msb = self._read_register(_JOYSTICK_Y_MSB)
# Read LSB for vertical joystick position
y_lsb = self._read_register(_JOYSTICK_Y_LSB)
# mask off bytes and combine into 10-bit integer
y = ((y_msb & 0xFF) << 8 | (y_lsb & 0xFF)) >> 6
return y
@property
def button(self):
"""0 if button is down, 1 if button is up."""
button = self._read_register(_JOYSTICK_BUTTON)
return button
# Issue: register 0x08 always contains 1 for some reason, even when cleared
@property
def button_status(self):
"""1 if button pressed between reads, cleared after read."""
# read button status (since last check)
status = self._read_register(_JOYSTICK_STATUS)
# clear button status
self._write_register(_JOYSTICK_STATUS, 0x00)
return status & 0xFF
# public functions
def set_i2c_address(self, new_address):
"""Change the i2c address of Joystick snd return True if successful."""
# check range of new address
if new_address < 8 or new_address > 119:
print("ERROR: Address outside 8-119 range")
return False
# write magic number 0x13 to lock register, to unlock address for update
self._write_register(_JOYSTICK_I2C_LOCK, 0x13)
# write new address
self._write_register(_JOYSTICK_CHANGE_ADDRESS, new_address)
# wait a second for joystick to settle after change
sleep(1)
# try to re-create new i2c device at new address
try:
self._device = I2CDevice(self._i2c, new_address)
except ValueError as err:
print("Address Change Failure")
print(err)
return False
# if we made it here, everything went fine
return True
# No i2c begin function is needed since I2Cdevice class takes care of that
# private functions
def _read_register(self, addr):
# Read and return a byte from the specified 8-bit register address.
with self._device as device:
device.write(bytes([addr & 0xFF]))
result = bytearray(1)
device.readinto(result)
# For some reason, write_then_readinto returns invalid data
# device.write_then_readinto(bytes([addr & 0xFF]), result)
if self._debug:
print("$%02X => %s" % (addr, [hex(i) for i in result]))
return result[0]
def _write_register(self, addr, value):
# Write a byte to the specified 8-bit register address
with self._device as device:
device.write(bytes([addr & 0xFF, value & 0xFF]))
if self._debug:
print("$%02X <= 0x%02X" % (addr, value)) | python | 17 | 0.607455 | 82 | 34.308271 | 133 | CircuitPython class for the Sparkfun QwiicJoystick
Usage:
# import the CircuitPython board and busio libraries
import board
import busio
# Create bus object using the board's I2C port
i2c = busio.I2C(board.SCL, board.SDA)
joystick = QwiicJoystick(i2c) # default address is 0x20
# use QwiicJoystick(i2c, address) for a different address
# joystick = QwiicJoystick(i2c, 0x21) | class |
class Item:
"""
Class representing store hub files.
"""
def __init__(self, id: str, base_url: str):
self.id = id
self.base_url = base_url
@cached_property
def public_url(self):
""" Get public url from item in workspace.
"""
url = f'{self.base_url}/workspace/items/{self.id}/publiclink?gcube-token={self.token}'
x = requests.get(url)
# for some reason, the response returns an url with surrounding quote marks
return x.text[1:-1]
@property
def token(self):
return context.token | python | 11 | 0.587329 | 94 | 25.590909 | 22 |
Class representing store hub files.
| class |
class VMFCache:
""" An expandable-size cache for VMFs. This lets us skip the load process
for VMFs that we've already loaded before, which is helpful for VMFs that
take a long time to parse.
"""
def __init__(self):
self.maxSize = 1
self.data = {}
self.unusedPaths = set()
self.pendingUnusedPaths = set()
self._mutex = RLock()
def increase_max_size(self, maxSize):
''' Increases the max size of the cache to the given number.
If the requested max size is less than the current size, this does
nothing.
'''
with self._mutex:
if maxSize > self.maxSize:
self.set_max_size(maxSize)
def set_max_size(self, maxSize):
with self._mutex:
if maxSize < self.get_vmf_count():
raise ValueError("Can't clear enough unused entries!")
self.evict_unused()
self.maxSize = maxSize
assert len(self.data) <= self.maxSize
def add_vmf(self, vmf):
vmfPath = vmf.path
with self._mutex:
assert len(self.data) <= self.maxSize
if vmfPath in self.pendingUnusedPaths:
# This VMF has been preemptively marked as unused.
# Don't bother caching it.
self.pendingUnusedPaths.remove(vmfPath)
return
if len(self.data) >= self.maxSize:
if len(self.unusedPaths) > 0:
self.evict_unused(limit=1)
else:
raise ValueError("VMF cache limit reached!")
self.data[vmfPath] = vmf
assert len(self.data) <= self.maxSize
def mark_used(self, *vmfPaths):
with self._mutex:
for vmfPath in vmfPaths:
if vmfPath in self.unusedPaths:
self.unusedPaths.remove(vmfPath)
def mark_unused(self, *vmfPaths):
with self._mutex:
for vmfPath in vmfPaths:
if vmfPath in self.data:
self.unusedPaths.add(vmfPath)
else:
self.pendingUnusedPaths.add(vmfPath)
def evict_unused(self, limit=float('inf')):
with self._mutex:
for i, unusedPath in enumerate(set(self.unusedPaths)):
if i >= limit:
break
del self.data[unusedPath]
self.unusedPaths.remove(unusedPath)
print("Evicted", unusedPath)
assert len(self.data) <= self.maxSize
def has_vmf_path(self, path):
with self._mutex:
return path in self.data
def get_vmfs(self):
with self._mutex:
return [
vmf for vmf in self.data.values()
if vmf.path not in self.unusedPaths
]
def get_vmf_count(self):
with self._mutex:
return len(self.data) - len(self.unusedPaths) | python | 16 | 0.489004 | 77 | 32.080808 | 99 | An expandable-size cache for VMFs. This lets us skip the load process
for VMFs that we've already loaded before, which is helpful for VMFs that
take a long time to parse.
| class |
class StateHandler:
"""utilities commonly used when working with states"""
def getDateString(date):
"""returns iso-date-string of specified date"""
return str(f"{date.year}-{date.month}-{date.day}")
def getAppropriateState(title):
"""returns appropriate state depending of due_date and deadline"""
if ItemHandler.getProperty(title, "due_date") == StateHandler.getDateString(datetime.now()):
return "active"
elif ItemHandler.getProperty(title, "due_date") == None:
return "upcoming"
elif ItemHandler.getProperty(title, "deadline") == None:
return "upcoming"
elif ItemHandler.getProperty(title, "deadline") == StateHandler.getDateString(datetime.now()):
return "urgent" | python | 12 | 0.651786 | 102 | 48.0625 | 16 | utilities commonly used when working with states | class |
class DBConfig:
"""
Holds the DB parameters for the web scraping.
"""
HOST = "localhost"
USER = "root"
PASSWORD = "password" # not real password, change after pulling this file
DATABASE = "brbeky1hybvf32t4ufxz"
INSERT_CITY_QUERY = "INSERT IGNORE INTO cities(city_name) values (%s)"
INSERT_LISTINGS_QUERY = "INSERT IGNORE INTO listings(listing_type) values (%s)"
INSERT_PROPERTY_TYPES_QUERY = "INSERT IGNORE INTO property_types(property_type) values (%s)"
FK_IDS_LIST = ['listing_id', 'property_type_id', 'city_id']
PRICE_COLUMN_IDX = 3
LATITUDE_COLUMN_IDX = -5
GET_LISTING_TYPE_ID_QUERY = "SELECT id FROM listings WHERE listing_type = %s"
GET_PROPERTY_TYPE_ID_QUERY = "SELECT id FROM property_types WHERE property_type = %s"
GET_CITY_ID_QUERY = "SELECT id FROM cities WHERE city_name = %s"
TUPLE_FIRST_ELEMENT_IDX = 0
LISTING_TYPE_IDX = 0
PROPERTY_TYPE_IDX = 1
CITY_IDX = 2
SEPARATOR = ","
TABLE_FEEDER_COLUMN_IDX = 3 | python | 7 | 0.663699 | 96 | 33.896552 | 29 |
Holds the DB parameters for the web scraping.
| class |
class Configuration:
"""
Holds the user parameters for the web scraping.
"""
# class attr
args = None
# PARAMETERS KWARGS KEYS
VERBOSE_KEY = 'verbose'
LIMIT_KEY = 'limit'
PRINT_KEY = 'to_print'
SAVE_KEY = 'save'
DB_KEY = 'to_database'
FETCH_KEY = 'fetch_info'
LISTING_TYPE_KEY = 'listing_type'
# CONSTANTS FOR SCRAPING
PRINTABLE = set(string.printable)
SILENCE_DRIVER_LOG = '0'
BROWSER_WIDTH = 1919
BROWSER_HEIGHT = 1079
PROPERTY_LISTING_TYPE = ('buy', 'rent', 'commercial', 'new_homes', 'all')
LISTING_MAP = {
'buy': ['buy'],
'rent': ['rent'],
'commercial': ['commercial'],
'new_homes': ['new homes'],
'all': ['buy', 'rent', 'commercial', 'new homes']
}
MAIN_URL = 'https://www.onmap.co.il/en'
URLS = {'buy': MAIN_URL + '/homes/buy',
'rent': MAIN_URL + '/homes/rent',
'commercial': MAIN_URL + '/commercial/rent',
'new homes': MAIN_URL + '/projects'}
COLUMNS_NOT_SELENIUM = ['Date', 'City_name', 'Street_name', 'House_number', 'Bathrooms', 'Rooms', 'Floor',
'Area[m^2]',
'Parking_spots_aboveground', 'Parking_spots_underground', 'Price[NIS]', 'Property_type']
SCROLL_PAUSE_TIME = 1
BETWEEN_URL_PAUSE = 3
SINGLE_ATR_ITEM = 1
TRIVIAL_NUMBER = 0
INVALID_FLOOR_TEXT_SIZE = 1
NOT_SELENIUM_PRINTING_HASH_CONSTANT = 20
NONE = 'none'
DICT_PROPERTY_ID = {'id': 'propertiesList'}
# INDICES FOR PARSING
NOT_SELENIUM_PARSING_FILE_IDX = 0
ELEM_TO_SCROLL_IDX = -1
PRICE_IDX = -1
CITY_IDX = -1
ADDRESS_IDX = -2
PROPERTY_TYPE_IDX = 1
NUM_OF_ROOMS_IDX = 0
FLOOR_IDX = 1
SIZE_IDX = 2
PARKING_SPACES_IDX = 3
FILENAME_IDX = -1
SIZE_TEXT_IDX = 0
NOT_SELENIUM_REGION_IDX = -1
URL_SPLIT_SEPARATOR = '/'
NOT_SELENIUM_SEPARATOR = '.'
SEPARATOR = ", "
PROPERTIES_LIST_IDX = 1
LEN_PROPER = 2
EMPTY = ""
DUMMY_REPLACER = 0
# XPATHS AND SELENIUM COMMANDS
SCROLL_COMMAND = "arguments[0].scrollIntoView();"
PROPERTIES_XPATH = "//div[@style='position: relative;']"
BOTTOM_PAGE_XPATH = "//div[@class='G3BoaHW05R4rguvqgn-Oo']"
# Handling strings
ENCODING = "ISO-8859-8"
COMMERCIAL_FILENAME = "commercial.csv"
NEW_HOMES_FILENAME = "new_homes.csv"
PROJECT = 'project'
COMMERCIAL = 'commercial'
# DF columns names
PRICE_COL = 'Price'
ROOM_COL = 'Rooms'
FLOOR_COL = 'Floor'
AREA_COL = 'Area'
CITY_COL = 'City'
PARKING_COL = 'Parking_spots'
PROP_TYPE_COL = 'Property_type'
LIST_TYPE_COL = 'listing_type'
@classmethod
def define_parser(cls):
"""
Creates the command line arguments
"""
arg_parser = argparse.ArgumentParser(
description="Scraping OnMap website | Checkout https://www.onmap.co.il/en/")
arg_parser.add_argument(
"property_listing_type",
choices=Configuration.PROPERTY_LISTING_TYPE,
help="choose which type of properties you would like to scrape",
type=str)
arg_parser.add_argument('--limit', '-l',
help="limit to n number of scrolls per page", metavar="n",
type=int,
required=False)
arg_parser.add_argument("--print", '-p', help="print the results to the screen", action="store_true")
arg_parser.add_argument("--save", '-s',
help="save the scraped information into a csv file in the same directory",
action="store_true")
arg_parser.add_argument("--database", '-d',
help="inserts new information found into the on_map database",
action="store_true")
arg_parser.add_argument("--fetch", '-f',
help="fetches more information for each property using Nominatim API",
action="store_true")
arg_parser.add_argument("--verbose", '-v', help="prints messages during the scraper execution",
action="store_true")
cls.args = arg_parser.parse_args() | python | 12 | 0.55535 | 116 | 34.991667 | 120 |
Holds the user parameters for the web scraping.
| class |
class Logger:
"""
This class handles logging for the entire web scraping process
"""
logger = None
scroll_finished = FINISHED_SCROLLING
scroll_finished_new_home = HOMES_FINISHED_SCROLLING
end_scroll_function = SCROLL_FINISHED
end_scroll_new_home = NEW_HOMES_FINISHED
fetch_more_init = MORE_ATTRIBUTES_STARTING
geofetcher_init = GEOFETCHER_INITIALIZED
end_fetch_more_att = FINISHED_SUCCESSFULLY_FETCH
main_cli = PARSER_WAS_SUCCESSFUL
main_no_url = NO_URLS_FOUND_TO_SCRAPE
main_scrape_obj = SCRAPER_OBJECT
main_closing_driver = CLOSING_DRIVER
main_quit_drive = QUITTING_DRIVER
error_connect_server = ERROR_CONNECTION
connection_successful = DB_SUCCESSFUL
commit_successful = COMMIT_TO_DB_SUCCESSFUL
@classmethod
def start_logging(cls):
cls.logger = logging.getLogger('on_map_scraper')
cls.logger.setLevel(logging.DEBUG)
formatter = logging.Formatter("'%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s'")
# create a file handler and add it to logger
file_handler = logging.FileHandler('web_scraper.log', mode='a')
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
cls.logger.addHandler(file_handler)
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.CRITICAL)
stream_handler.setFormatter(formatter)
cls.logger.addHandler(stream_handler)
@staticmethod
def scroll_error(ele_to_scroll):
"""
Message for the logger in case of error when scrolling.
----
:param ele_to_scroll: html element to look for when scrolling
:type ele_to_scroll: str
:return: error message
:rtype: str
"""
return f"_scroll: ele_to_scroll should have a content but it is {ele_to_scroll}"
@staticmethod
def scroll_new_homes(prev_len):
"""
Message for the logger in when scrolling.
----
:param prev_len: number of elements found when scrolling
:type prev_len: int
:return: message
:rtype: str
"""
return f"_scroll_new_homes:prev_len {prev_len}"
@staticmethod
def end_save_csv(url):
"""
Message for the logger when finished saving an url content to a csv
----
:param url: url address
:type url: int
:return: message
:rtype: str
"""
return f"_save_to_csv: finished {url}"
@staticmethod
def init_print_save_df(url, to_print, save, to_database, verbose, listing_type):
"""
Message for the logger at beginning of print_save_df function
----
:param url: url address
:type url: str
:param listing_type: type of listing: buy, rent, commercial, new_home
:type listing_type: str
:param to_print: if true, it prints the dataframe to the screen
:type to_print: bool
:param save: if true, it saves the dataframe into a csv file
:type save: bool
:param to_database: if true, it saves the new information from the dataframe to the database
:type to_database: bool
:param verbose: if true, it prints relevant information to the user
:type verbose: bool
"""
return f"_print_save_df: Checking if print {url}, to_print={to_print}, save={save}, to_database={to_database}, " \
f"verbose={verbose}, listing_type={listing_type}"
@staticmethod
def saving_print_save_df(url, to_print, save, to_database, verbose, listing_type):
"""
Message for the logger before saving to a csv in print_save_df function
----
:param url: url address
:type url: str
:param listing_type: type of listing: buy, rent, commercial, new_home
:type listing_type: str
:param to_print: if true, it prints the dataframe to the screen
:type to_print: bool
:param save: if true, it saves the dataframe into a csv file
:type save: bool
:param to_database: if true, it saves the new information from the dataframe to the database
:type to_database: bool
:param verbose: if true, it prints relevant information to the user
:type verbose: bool
"""
return f"_print_save_df: Saving into csv {url}, to_print={to_print}, save={save}, to_database={to_database}, " \
f"verbose={verbose}, listing_type={listing_type}"
@staticmethod
def db_print_save_df(url, to_print, save, to_database, verbose, listing_type):
"""
Message for the logger before saving into the db in print_save_df function
----
:param url: url address
:type url: str
:param listing_type: type of listing: buy, rent, commercial, new_home
:type listing_type: str
:param to_print: if true, it prints the dataframe to the screen
:type to_print: bool
:param save: if true, it saves the dataframe into a csv file
:type save: bool
:param to_database: if true, it saves the new information from the dataframe to the database
:type to_database: bool
:param verbose: if true, it prints relevant information to the user
:type verbose: bool
"""
return f"_print_save_df: Saving into db {url}, to_print={to_print}, save={save}, to_database={to_database}, " \
f"verbose={verbose}, listing_type={listing_type}"
@staticmethod
def end_print_save(url):
"""
Message for the logger when finished running the function _print_save_df
----
:param url: url address
:type url: int
:return: message
:rtype: str
"""
return f"_print_to_save: finished {url}"
@staticmethod
def pulling_row_info(row_number):
"""
Message for the logger when pulling row information in fetch_more_attributes function
----
:param row_number: row number in the dataframe
:type row_number: int
:return: message
:rtype: str
"""
return f"fetch_more_attributes: Pulling info for row {row_number}"
@staticmethod
def exception_fetch_more_attributes(row_number, exception):
"""
Message for the logger when an exception occurred when pulling row information in fetch_more_attributes function
----
:param row_number: row number in the dataframe
:type row_number: int
:param exception: error message
:type exception: exception
:return: message
:rtype: str
"""
return f"fetch_more_attributes: row {row_number}, {exception}"
@staticmethod
def not_fetched(fetch_info):
"""
Message for the logger when additional information was not fetched
----
:param fetch_info: row number in the dataframe
:type fetch_info: bool
:return: message
:rtype: str
"""
return f"fetch_more_attributes: fetch info == {fetch_info}"
@staticmethod
def creating_df(url):
"""
Message for the logger when _create_df is called
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"create_df: Creating dataframe from {url}"
@staticmethod
def created_df(url):
"""
Message for the logger when _create_df is finished
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"create_df: Created dataframe from {url} successfully"
@staticmethod
def scraping(url):
"""
Message for the logger when scrap_url is called
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: Scrolling {url}"
@staticmethod
def before_scroll(url):
"""
Message for the logger before _scroll is called
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: Scrolling {url} - not new_homes"
@staticmethod
def before_scroll_new_home(url):
"""
Message for the logger before _scroll_new_homes is called
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: Scrolling {url} - new_homes"
@staticmethod
def before_scraping(url):
"""
Message for the logger before starting to actually scrape in scrap_url
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: Scraping {url}"
@staticmethod
def finished_scraping(url):
"""
Message for the logger at the end of scrap_url
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"scrap_url: finished {url}"
@staticmethod
def main_scraping(url):
"""
Message for the logger before calling scrap_url
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"main: Scraping {url}"
@staticmethod
def main_scraped_success(url):
"""
Message for the logger after all scraping operations are done for the particular url
----
:param url: url address
:type url: str
:return: message
:rtype: str
"""
return f"main: Scrapped {url} successfully"
@staticmethod
def connect_to_server(listing, verbose):
"""
Message for the logger before connecting to db server
----
:param listing: listing type of the dataframe
:type listing: str
:param verbose: whether nor not to print relevant info to the user
:type verbose: bool
:return: message
:rtype: str
"""
return f"_save_to_data_base: Connecting to the db listing_type={listing}, verbose={verbose}"
@staticmethod
def insert_city_error(city):
"""
Message for the logger when error for inserting existing city in cities table
----
:param city: city already in table
:type city: str
:return: message
:rtype: str
"""
return f"_save_to_data_base: {city} is already in cities."
@staticmethod
def insert_city_error(listing):
"""
Message for the logger when error for inserting existing listing type in listings table
----
:param listing: listing already in table
:type listing: str
:return: message
:rtype: str
"""
return f"_save_to_data_base: {listing} is already in listings."
@staticmethod
def insert_city_error(property):
"""
Message for the logger when error for inserting existing property in properties table
----
:param property: property already in table
:type property: str
:return: message
:rtype: str
"""
return f"_save_to_data_base: {property} is already in properties."
@staticmethod
def insert_row_error(row):
"""
Message for the logger when error for inserting existing property in properties table
----
:param row: row already in table
:type row: pd.Series
:return: message
:rtype: str
"""
return f"_save_to_data_base: {row} is already in properties. " | python | 12 | 0.594302 | 122 | 32.137255 | 357 |
This class handles logging for the entire web scraping process
| class |
class ExperimentConfig:
"""
Configuration Parameters for experiments
"""
# number of fields of view in each well - pairs of images (DAPI and FITC) for each field
FIELDS_PER_WELL = 20
# smoothing constant
EPS = 0.00000001
# Moedl paths:
# -------------------------------------------------------------------------
NUCLEI_MASK_RCNN_WEIGHTS_PATH = "deepretina_final.h5"
NEURITE_SEGMENTATION_MODEL_PATH = "neurite_unet_weights.h5"
# Parameters for a boolean mask that containing a round search area to search
# for cells in the proximity of neurite endpoints
# ----------------------------------------------------------------------------
# length in pixels of the search radius around each neurite endpoint to search for a cell
RADIUS = 15
# square boolean mask edge length
square_edge_length = (RADIUS + 1) * 2 + 1
y, x = np.ogrid[: square_edge_length, : square_edge_length]
# boolean mask with disk of ones at the center
DISK_MASK = (x - (RADIUS + 1)) ** 2 + (y - (RADIUS + 1)) ** 2 <= RADIUS ** 2
# Outlier Removal
# ----------------------------------------------------------------------------
# minimum number of fields to accept the results of a well as valid
MIN_VALID_FIELDS = 5
# Outlier removal thresholds:
# minimal number of cells allowed in a field for it to be valid
MIN_CELL_NUM = 50
# maximal number of cells allowed in a field for it to be valid
MAX_CELL_NUM = 1000
# max allowed ratio of un-viable cells in a field
MAX_APOP_RATIO = 0.25
# max allowed ratio of extremely clustered cells
MAX_HIGH_DENSITY_RATIO = 0.45
# Parameters for cell density:
# a cell in a highly dense area in the field is a cell with
# at least MIN_SAMPLES in a range of D_EPS raduis around it
D_EPS = 100
MIN_SAMPLES = 10
# unsupervised outlier removal constants:
# straight line will be calculated using Random Sample Consensus (RANSAC) algorithm
# number of samples randomly selected equal to RANSAC_MIN_SAMPLES.
RANSAC_MIN_SAMPLES = 5
assert RANSAC_MIN_SAMPLES <= MIN_VALID_FIELDS, "The minimal number of valid fields has to be equal or larger" \
" than the number of minimal ransac samples or else" \
" the algorithm might not work"
# fields with residual distance far away will have a low probability to fit the RANSAC line
# fields with probability lower than threshold will be considered un-valid.
PROBABILITY_THRESHOLD = 0.05
# Connection Probability
# ----------------------
# connection distances
SHORT_DISTANCE = 100
INTERMEDIATE_DISTANCE = 300
LONG_DISTANCE = 400
# connection probability over a distance (connection_pdf) constants:
# minimal and maximal distances for calculating the probability of connection
MIN_DISTANCE = 0
MAX_DISTANCE = 1000
# distance range of each pdf bin - meaning the probability of connection will be calculated in
# the following distance ranges to create the connection_pdf:
# (MIN_DISTANCE : BIN_SIZE),
# ((MIN_DISTANCE + BIN_SIZE) : (MIN_DISTANCE + 2*BIN_SIZE)),
# ...
# (MAX_DISTANCE - BIN_SIZE) : MAX_DISTANCE) range
BIN_SIZE = 25
def display(self):
"""Display Configuration values."""
print("\nConfigurations:")
for a in dir(self):
if a.isupper():
print("{:30} {}".format(a, getattr(self, a)))
print("\n") | python | 16 | 0.60173 | 115 | 35.948454 | 97 |
Configuration Parameters for experiments
| class |
class LaneGeneratorTSMixin: # (LaneGeneratorCU):
""" Generates a time series of lanes.
"""
def __iter__(self):
return self
def __next__(self):
""" Iterator over the frames of the movie.
"""
curr_idx_batch = 0
curr_time_step = 0
X_list = []
y_list = []
X_list_ts = []
y_list_ts = []
while curr_time_step < self.nb_time_steps:
curr_filename = next(self._file_iterator())
if curr_idx_batch < self.batch_size_ts:
X, y = self._generate_one_Xy(curr_filename)
X_list_ts.append(X)
y_list_ts.append(y)
curr_idx_batch += 1
else:
X_list.append(np.array(X_list_ts))
y_list.append(np.array(y_list_ts))
curr_idx_batch = 0
curr_time_step += 1
X_list_ts = []
y_list_ts = []
return np.array(X_list), np.array(y_list)
def show_movie_with_lanes(self, wait_between_frames : int = 100 ):
""" Shows the movie from images.
"""
for X, y in self:
# X, y of shape (batch_size, nb_time_steps, image_x, image_y, nb_channels)
for batch_X, batch_y in zip(X, y):
for X_time_step, y_time_step in zip(batch_X, batch_y):
cv2.imshow('TS Video', cv2.addWeighted(X_time_step, 0.6, y_time_step, 0.8, 0))
cv2.waitKey(wait_between_frames) | python | 16 | 0.489446 | 98 | 31.276596 | 47 | Generates a time series of lanes.
| class |
class Point:
'''
A Point in a bidimensional plane with coordinates (x, y) and an index to identify it.
'''
def __init__(self, index: int, x: int, y: int):
self.__index = index
self.__x = x
self.__y = y
@property
def index(self) -> int:
'''
Index that works as an identification.
'''
return self.__index
@property
def x(self) -> int:
'''
Coordinate X.
'''
return self.__x
@property
def y(self) -> int:
'''
Coordinate Y.
'''
return self.__y
def distance(self, point: 'Point') -> float:
'''
Calculates the Euclidean distance to another Point.
'''
dx = abs(self.x - point.x)
dy = abs(self.y - point.y)
return math.hypot(dx, dy)
def __str__(self) -> str:
'''
Returns the string representation of a Point:
<index> <x> <y>
'''
return str(self.index) + ' ' + str(self.x) + ' ' + str(self.y)
def __eq__(self, point: 'Point') -> bool:
return self.index == point.index and self.x == point.x and self.y == point.y
def __hash__(self):
return hash((self.index, self.x, self.y))
def __repr__(self) -> str:
# return f'Point(index={self.index}, x={self.x}, y={self.y})'
return str(self.index) | python | 13 | 0.493863 | 89 | 23.75 | 56 |
A Point in a bidimensional plane with coordinates (x, y) and an index to identify it.
| class |
class FinanceHMM:
"""
Class to compute multivariate mixture distributions from n_assets based on a given HMM.
Computes posteriors, state sequences as well as expected and forecasted returns and standard deviations.
Transforms lognormal multivariate distributions into normal distributions and combines them into mixtures.
Parameters
----------
X : ndarray of shape (n_samples,)
Times series data used to train the HMM.
df : DataFrame of shape (n_samples, n_assets)
Times series data used when estimating expected returns and covariances.
model : hidden markov model
Hidden Markov Model object.
Attributes
----------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
mean predictions for each asset h time steps into the future at each time t.
cov : ndarray of shape(n_samples-window_len, n_preds, n_assets, n_assets)
predicted covariance matrix h time steps into the future at each time t.
"""
def __init__(self, model):
self.model = model
self.n_states = model.n_states
self.n_assets = None
def get_cond_asset_dist(self, df, state_sequence):
"""
Compute conditional multivariate normal distribution of all assets in each state.
Assumes returns follow a multivariate log-normal distribution. Proceeds by first
getting the conditional log of means and covariances and then transforming them
back into normal varibles.
Parameters
----------
df : DataFrame of shape (n_samples, n_assets)
log-returns for assets
state_sequence : ndarray of shape (n_samples,)
Decoded state sequence
Returns
-------
mu : ndarray of shape (n_states, n_assets)
Conditional mean value of each assets
cov : ndarray of shape (n_states, n_assets, n_assets)
Conditional covariance matrix
"""
self.n_assets = df.shape[1]
df = df.iloc[-len(state_sequence):]
df['state_sequence'] = state_sequence
groupby_state = df.groupby('state_sequence')
log_mu, log_cov = groupby_state.mean(), groupby_state.cov()
state_count = groupby_state.count().max(axis=1) # Num obs in each state
mu = np.zeros(shape=(self.n_states, self.n_assets))
cov = np.zeros(shape=(self.n_states, self.n_assets, self.n_assets))
# Loop through n_states present in current sample
for s in log_mu.index:
if state_count[s] > 1: # If state_count not >1, covariance will return NaN
mu[s], cov[s] = self.logcov_to_cov(log_mu.loc[s], log_cov.loc[s])
return mu, cov
def get_uncond_asset_dist(self, posteriors, cond_mu, cond_cov):
"""
Compute unconditional multivariate normal distribution of all assets.
Parameters
----------
posteriors: ndarray of shape (n_preds, n_states)
predicted posterior probability of being in state i at time t+h.
cond_mu : ndarray of shape (n_states, n_assets)
Conditional mean value of each assets
cond_cov : ndarray of shape (n_states, n_assets, n_assets)
Conditional covariance matrix
Returns
-------
pred_mu : ndarray of shape (n_preds, n_assets)
Conditional mean value of each assets
pred_cov : ndarray of shape (n_preds, n_assets, n_assets)
Conditional covariance matrix
"""
pred_mu = np.inner(cond_mu.T, posteriors).T # shape (n_preds, n_assets)
cov_x1 = np.inner(posteriors, cond_cov.T) # shape (n_preds, n_assets, n_assets)
cov_x2 = pred_mu - cond_mu[:, np.newaxis] # shape (n_states, n_preds)
cov_x3 = np.einsum('ijk,ijk->ij', cov_x2, cov_x2) # Equal to np.sum(X**2, axis=-1)
cov_x4 = np.einsum('ij,ij->i', cov_x3.T, posteriors) # Equal to np.sum(X3*posteriors, axis=1)
pred_cov = cov_x1 + cov_x4[:, np.newaxis, np.newaxis] # shape (n_preds, n_assets, n_assets)
return pred_mu, pred_cov
@staticmethod
def logcov_to_cov(log_mu, log_cov):
"""
Transforms log returns' means and covariances back into regular formats.
Parameters
----------
log_mu : DataFrame of shape (n_assets,)
log_cov : DataFrame of shape (n_assets, n_assets)
Returns
-------
mu : ndarray of shape (n_assets)
Mean value of each assets
cov : ndarray of shape (n_assets, n_assets)
Covariance matrix
"""
diag = np.diag(log_cov)
mu = np.exp(log_mu + np.diag(log_cov) / 2) - 1
x1 = np.outer(mu, mu) # Multiply all combinations of the vector mu -> 2-D array
x2 = np.outer(diag, diag) / 2
cov = np.exp(x1 + x2) * (np.exp(log_cov) - 1)
return mu, cov
def stein_shrinkage(self, cond_cov, shrinkage_factor=(0.2, 0.4)):
"""Stein-type shrinkage of conditional covariance matrices"""
shrinkage_factor = np.array(shrinkage_factor)
# Turn it into 3D to make it broadcastable with cond_cov
shrink_3d = shrinkage_factor[:, np.newaxis, np.newaxis]
term1 = (1-shrink_3d) * cond_cov
# Turn term2 into 3D to make it broadcastable with term3
term2 = (shrinkage_factor * np.trace(cond_cov.T) * 1/self.n_assets) # Shape (n_states,)
term3 = np.broadcast_to(np.identity(self.n_assets)[..., np.newaxis],
(self.n_assets,self.n_assets,self.n_states)).T # Shape (n_states, n_assets, n_assets)
term4 = term2[:, np.newaxis, np.newaxis] * term3
cond_cov = term1 + term4
return cond_cov
def fit_model_get_uncond_dist(self, X, df, n_preds=15, shrinkage_factor=(0.2, 0.4), verbose=False):
"""
From data, fit hmm model, predict posteriors probabilities and return unconditional distribution.
Wraps model.fit_predict, get_cond_asset_dist and get_uncond_asset_dist methods into one.
Parameters
----------
X : ndarray of shape (n_samples,)
Time series of data
df : DataFrame of shape (n_samples, n_assets)
Historical returns for each asset i.
n_preds : int, default=15
Number of h predictions
verbose : boolean, default=False
Get verbose output
Returns
-------
pred_mu : ndarray of shape (n_preds, n_assets)
Conditional mean value of each assets
pred_cov : ndarray of shape (n_preds, n_assets, n_assets)
Conditional covariance matrix
"""
self.n_assets = df.shape[1]
# fit model, return decoded historical state sequnce and n predictions
# state_sequence is 1D-array with same length as X_rolling
# posteriors is 2D-array with shape (n_preds, n_states)
state_sequence, posteriors = self.model.fit_predict(X, n_preds=n_preds, verbose=verbose)
# Compute conditional mixture distributions in rolling period
cond_mu, cond_cov = \
self.get_cond_asset_dist(df, state_sequence) # shapes (n_states, n_assets), (n_states, n_assets, n_assets)
cond_cov = self.stein_shrinkage(cond_cov, shrinkage_factor=shrinkage_factor)
# Transform into unconditional moments at time t
# Combine with posteriors to also predict moments h steps into future
# shapes (n_preds, n_assets), (n_preds, n_assets, n_assets)
pred_mu, pred_cov = self.get_uncond_asset_dist(posteriors, cond_mu, cond_cov)
return pred_mu, pred_cov, posteriors, state_sequence | python | 15 | 0.615365 | 119 | 41.910615 | 179 |
Class to compute multivariate mixture distributions from n_assets based on a given HMM.
Computes posteriors, state sequences as well as expected and forecasted returns and standard deviations.
Transforms lognormal multivariate distributions into normal distributions and combines them into mixtures.
Parameters
----------
X : ndarray of shape (n_samples,)
Times series data used to train the HMM.
df : DataFrame of shape (n_samples, n_assets)
Times series data used when estimating expected returns and covariances.
model : hidden markov model
Hidden Markov Model object.
Attributes
----------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
mean predictions for each asset h time steps into the future at each time t.
cov : ndarray of shape(n_samples-window_len, n_preds, n_assets, n_assets)
predicted covariance matrix h time steps into the future at each time t.
| class |
class Backtester:
"""
Backtester for Hidden Markov Models.
Parameters
----------
Attributes
----------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
mean predictions for each asset h time steps into the future at each time t.
cov : ndarray of shape(n_samples-window_len, n_preds, n_assets, n_assets)
predicted covariance matrix h time steps into the future at each time t.
"""
def __init__(self, window_len=1700):
self.preds = None
self.cov = None
self.n_states = None
self.n_assets = None
self.window_len = window_len
def rolling_preds_cov_from_hmm(self, X, df_logret, model, n_preds=15, window_len=None, shrinkage_factor=(0.3, 0.3), verbose=False):
"""
Backtest based on rolling windows.
Fits a Hidden Markov model within each rolling window and computes the unconditional
multivariate normal mixture distributions for each asset in the defined universe.
Parameters
----------
X : ndarray of shape (n_samples,)
Log-returns. Times series data used to train the HMM.
df_logret : DataFrame of shape (n_samples, n_assets)
Log-returns. Times series data used when estimating expected returns and covariances.
model : hidden markov model
Hidden Markov Model object
n_preds : int, default=15
Number of h predictions
window_len : int, default=1500
verbose : boolean, default=False
Make output verbose
Returns
-------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
Unconditional mean values for each asset
cov : ndarray of shape (n_samples-window_len, n_preds, n_assets, n_assets)
Unconditional covariance matrix at each time step t, h steps into future
"""
self.n_states = model.n_states
self.n_assets = df_logret.shape[1]
if window_len == None: # Ensure class and function window_lens match
window_len = self.window_len
else:
self.window_len = window_len
finance_hmm = FinanceHMM(model) # class for computing asset distributions and predictions.
# Create 3- and 4-D array to store predictions and covariances
self.preds = np.empty(shape=(len(df_logret) - window_len, n_preds, self.n_assets)) # 3-D array
self.cov = np.empty(shape=(len(df_logret) - window_len, n_preds, self.n_assets, self.n_assets)) # 4-D array
self.timestamp = np.empty(shape=len(df_logret) - window_len, dtype=object)
for t in tqdm.trange(window_len, len(df_logret)):
# Slice data into rolling sequences
df_rolling = df_logret.iloc[t-window_len: t]
X_rolling = X.iloc[t-window_len: t]
# fit rolling data with model, return predicted means and covariances, posteriors and state sequence
pred_mu, pred_cov, posteriors, state_sequence = \
finance_hmm.fit_model_get_uncond_dist(
X_rolling, df_rolling, shrinkage_factor=shrinkage_factor, n_preds=n_preds, verbose=verbose)
self.timestamp[t - window_len] = df_rolling.index[-1]
self.preds[t - window_len] = pred_mu
self.cov[t - window_len] = pred_cov
return self.preds, self.cov
def backtest_mpc(self, df_rets, preds, covariances, n_preds=15, port_val=1000,
start_weights=None, max_drawdown=0.4, max_holding_rf=1.,
max_leverage=2.0, gamma_0=5, kappa1=0.008,
rho2=0.0005, rho_rf=0.0001, max_holding=0.4, short_cons="LLO",
rf_included=True, eps=1e-6):
"""
Wrapper for backtesting MPC models on given data and predictions.
Parameters
----------
df_rets : DataFrame of shape (n_samples, n_assets)
Historical returns for each asset i. Cash must be at the last column position.
preds : ndarray of shape (n_samples, n_preds, n_assets)
list of return predictions for each asset h time steps into the future. Each element in list contains,
from time t, predictions h time steps into the future.
covariances : ndarray of shape (n_samples, n_preds, n_assets, n_assets)
list of covariance matrix of returns for each time step t.
port_val : float, default=1000
Starting portfolio value.
start_weights : ndarray of shape (n_assets,)
Current (known) portfolio weights at the start of backtest. Default is 100% allocation to cash.
Cash must be the last column in df_rets.
"""
self.port_val = np.array([0, port_val])
self.port_ret = np.array([1, 1])
self.n_assets = df_rets.shape[1]
self.n_preds = n_preds
df_rets = df_rets.iloc[-len(preds):] # Slice returns to match preds
if start_weights == None: # Standard init with 100% allocated to cash
start_weights = np.zeros(self.n_assets)
start_weights[-1] = 1.
else:
start_weights = start_weights
self.weights = np.zeros(shape=(len(preds) + 1, self.n_assets)) # len(preds) + 1 to include start weights
self.weights[0] = start_weights
gamma = np.array([]) # empty array
trade_cost, turnover = [], []
# Instantiate MPC object
mpc_solver = MPC(rets=preds[0], covariances=covariances[0], prev_port_vals=self.port_val,
start_weights=self.weights[0], max_drawdown=max_drawdown, gamma_0=gamma_0,
kappa1=kappa1, rho2=rho2, rho_rf=rho_rf, max_holding=max_holding, max_holding_rf=max_holding_rf
,max_leverage=max_leverage, short_cons=short_cons, rf_included=rf_included, eps=eps)
for t in tqdm.trange(preds.shape[0]):
# Update MPC object
mpc_solver.rets = np.array(preds[t])
mpc_solver.cov = np.array(covariances[t])
mpc_solver.start_weights = self.weights[t]
mpc_solver.prev_port_vals = self.port_val
# Solve MPC problem at time t and save weights
weights_mpc = mpc_solver.cvxpy_solver(verbose=False) # ndarray of shape (n_preds, n_assets)
self.weights[t + 1] = weights_mpc[0] # Only use first forecasted weights
gamma = np.append(gamma, mpc_solver.gamma)
delta_weights = self.weights[t] - self.weights[t-1]
# self.weights and df_rets are one shifted to each other. Time periods should match.
gross_ret = (self.weights[t + 1] @ (1 + df_rets.iloc[t]))
shorting_cost = self.short_costs(self.weights[t + 1], rf_return=df_rets.iloc[t, -1])
trans_cost = self.transaction_costs(delta_weights, trans_cost=0.001)
port_ret = (gross_ret-shorting_cost) * (1-trans_cost)
new_port_val = port_ret * self.port_val[-1]
self.port_ret = np.append(self.port_ret, port_ret)
self.port_val = np.append(self.port_val, new_port_val)
trade_cost.append(trans_cost)
turnover.append(np.linalg.norm(delta_weights, ord=1) / 2) # Half L1 norm
self.port_val = self.port_val[1:] # Throw away first observation since it is artificially set to zero
self.port_ret = self.port_ret[2:]
self.gamma = gamma
# Annualized average trading ost
self.trans_cost = np.array(trade_cost)
self.annual_trans_cost = 252 / len(self.trans_cost) * self.trans_cost.sum()
# Compute average annualized portfolio turnover
self.daily_turnover = np.array(turnover)
self.annual_turnover = 252 / len(self.daily_turnover) * self.daily_turnover.sum()
# Compute return & std.
n_years = len(self.port_val) / 252
annual_ret = self.port_ret.prod()**(1/n_years) - 1
annual_std = self.port_ret.std(ddof=1) * np.sqrt(252)
return annual_ret, annual_std, self.annual_turnover
def gridsearch_mpc(self, grid, df_rets, preds, covariances, n_preds=15, port_val=1000,
start_weights=None, max_drawdown=1000, max_leverage=2.0, gamma_0=5, kappa1=0.008,
rho2=0.0005, max_holding=0.4, short_cons="LO", rf_included=True, eps=1e-6):
results = pd.DataFrame()
for max_holding in grid['max_holding']:
for trans_costs in grid['trans_costs']:
for holding_costs in grid['holding_costs']:
for holding_costs_rf in grid['holding_costs_rf']:
print(f"""Computing grid -- max_holding {max_holding} -- trans_costs {trans_costs} holding_costs {holding_costs} holding_costs_rf {holding_costs_rf}""")
#try:
annual_ret, annual_std, annual_turnover = self.backtest_mpc(
df_rets, preds, covariances, n_preds=n_preds, port_val=port_val,
start_weights=start_weights, max_drawdown=max_drawdown, max_leverage=max_leverage,
gamma_0=gamma_0, kappa1=trans_costs, rho2=holding_costs, rho_rf=holding_costs_rf, max_holding=max_holding,
short_cons=short_cons, rf_included=rf_included, eps=eps
)
results_dict = {'max_holding': max_holding,
'trans_costs': trans_costs,
'holding_costs': holding_costs,
'holding_costs_rf': holding_costs_rf,
'return': annual_ret,
'std': annual_std,
'turnover': annual_turnover}
results = results.append(results_dict, ignore_index=True)
print(results.tail(1))
#except Exception as e:
# print('No convergence')
# print(e)
# continue
self.gridsearch_df = results
return results
def mpc_gammas_shortcons(self, gammas, constraints,
data, preds, covariances, n_preds=15, port_val=1000,
start_weights=None, max_holding_rf=1.,
max_leverage=2.0, trans_costs=0.001,
holding_costs=0.0000, max_holding=0.2, eps=1e-6):
df = pd.DataFrame()
for constr in constraints:
print(f'Backtesting for params {constr}')
results = {f'gamma_{i}': [] for i in gammas}
short_con = constr[0]
max_drawdown = constr[1]
for gamma in gammas:
self.backtest_mpc(data.rets, preds, covariances, n_preds=n_preds, port_val=port_val,
start_weights=start_weights, max_drawdown=max_drawdown, max_leverage=max_leverage,
gamma_0=gamma, kappa1=trans_costs, rho2=holding_costs, max_holding=max_holding,
short_cons=short_con, eps=eps)
results[f'gamma_{gamma}'] = self.port_val
df_temp = pd.DataFrame(results)
df_temp['short_cons'] = short_con
df_temp['D_max'] = max_drawdown
df_temp['timestamp'] = data.rets.index[-len(df_temp):]
df_temp['T-bills rf'] = data.prices['T-bills rf'].iloc[-len(df_temp):].values
df = df.append(df_temp)
# self.annual_turnover, self.annual_trans_cost, self.port_val
self.port_val_df = df
return df
def mpc_shortcons(self, constraints,
data, preds, covariances, n_preds=15, port_val=1000,
start_weights=None, max_holding_rf=1.,
max_leverage=2.0, trans_costs=0.001,
holding_costs=0.0000, max_holding=0.2, eps=1e-6):
df = pd.DataFrame()
results = {f'{constr[0]}_{constr[1]}': [] for constr in constraints}
for constr in constraints:
print(f'Backtesting for params {constr}')
short_con = constr[0]
max_drawdown = constr[1]
self.backtest_mpc(data.rets, preds, covariances, n_preds=n_preds, port_val=port_val,
start_weights=start_weights, max_drawdown=max_drawdown, max_leverage=max_leverage,
gamma_0=5, kappa1=trans_costs, rho2=holding_costs, max_holding=max_holding,
short_cons=short_con, eps=eps)
results[f'{constr[0]}_{constr[1]}'] = self.port_val
df = pd.DataFrame(results)
df['timestamp'] = data.rets.index[-len(df):]
df['T-bills rf'] = data.prices['T-bills rf'].iloc[-len(df):].values
# self.annual_turnover, self.annual_trans_cost, self.port_val
self.port_val_df = df
return df
def backtest_equal_weighted(self, df_rets, rebal_freq='M', port_val=1000, use_weights=None, start_weights=None):
"""
Backtest an equally weighted portfolio, with specified rebalancing frequency.
Parameters
----------
df_rets : DataFrame of shape (n_samples, n_assets)
Historical returns for each asset i. Cash must be at the last column position.
rebal_freq : int, default=20
Rebalance frequency. Default is 20, i.e monthly.
port_val : float, default=1000
Starting portfolio value.
start_weights : ndarray of shape (n_assets,)
Current (known) portfolio weights at the start of backtest. Default is 100% allocation to cash.
Cash must be the last column in df_rets.
"""
self.port_val = np.array([0, port_val])
self.n_assets = df_rets.shape[1]
if np.any(use_weights) == None:
use_weights = np.array([1 / self.n_assets] * self.n_assets) # Vector of shape (n_assets,)
if start_weights == None: # Standard init with 100% allocated to cash
start_weights = np.zeros(self.n_assets)
start_weights[-1] = 1.
else:
start_weights = start_weights
weights = start_weights
trade_cost, turnover = [], []
# Group data into months - average sample size is 20
# Then for each month loop over the daily returns and update weights
# The problem is recursive and thus requires looping done this way
for month_dt, df_group in tqdm.tqdm(df_rets.groupby(pd.Grouper(freq=rebal_freq))):
# Compute transaction costs for each month. Subtracted from gross ret the first of the month
delta_weights = use_weights - weights
trans_cost = self.transaction_costs(delta_weights)
weights = use_weights # Reset weights
for day in range(len(df_group)):
# Calculate gross returns for portfolio and append it
if day == 0:
gross_ret = (1 + df_group.iloc[day]) * (1-trans_cost)
else:
gross_ret = 1 + df_group.iloc[day]
new_port_val = weights @ gross_ret * self.port_val[-1]
self.port_val = np.append(self.port_val, new_port_val)
new_w = gross_ret * weights
new_w /= new_w.sum() # Weights sum to 1
weights = new_w # Update weights each iteration
trade_cost.append(trans_cost)
turnover.append(np.linalg.norm(delta_weights, ord=1) / 2) # Half L1 norm
self.port_val = self.port_val[1:] # Throw away first observation since it is artificially set to zero
# Annualized average trading ost
self.trans_cost = np.array(trade_cost)
self.annual_trans_cost = 12 / len(self.trans_cost) * self.trans_cost.sum()
# Compute average annualized portfolio turnover
self.monthly_turnover = np.array(turnover)
self.annual_turnover = 12 / len(self.monthly_turnover) * self.monthly_turnover.sum()
def short_costs(self, weights, rf_return):
"""
Compute shorting costs, assuming a fee equal to the risk-free asset is paid.
"""
weights_no_rf = weights[:-1] # Remove risk-free asset from array
short_weights = weights_no_rf[weights_no_rf < 0.0].sum() # Sum of all port weights below 0.0
return -short_weights * rf_return
def transaction_costs(self, delta_weights, trans_cost=0.001):
"""
Compute transaction costs. Assumes no costs in risk-free asset and equal cost to
buying and selling assets.
"""
delta_weights = delta_weights[:-1] # Remove risk-free asset as it doesn't have trading costs
delta_weights = np.abs(delta_weights).sum() # abs since same price for buying/selling
return delta_weights * trans_cost
def asset_metrics(self, df_prices):
"""Compute performance metrics for a given portfolio/asset"""
df_ret = df_prices.pct_change().dropna()
n_years = len(df_ret) / 252
# Get regular cagr and std
ret = df_ret.drop('T-bills rf', axis=1)
cagr = ((1 + ret).prod(axis=0)) ** (1 / n_years) - 1
std = ret.std(axis=0, ddof=1) * np.sqrt(252)
# Compute metrics in excess of the risk-free asset
excess_ret = df_ret.subtract(df_ret['T-bills rf'], axis=0).drop('T-bills rf', axis=1)
excess_cagr = ((1 + excess_ret).prod(axis=0)) ** (1 / n_years) - 1
excess_std = excess_ret.std(axis=0 ,ddof=1) * np.sqrt(252)
sharpe = excess_cagr / excess_std
df_prices = df_prices.drop('T-bills rf', axis=1)
peaks = df_prices.cummax(axis=0)
drawdown = -(df_prices - peaks) / peaks
max_drawdown = drawdown.max(axis=0)
calmar = excess_cagr / max_drawdown
metrics = {'return': cagr,
'std': std,
'excess_return': excess_cagr,
'excess_std': excess_std,
'sharpe': sharpe,
'max_drawdown': max_drawdown,
'calmar_ratio': calmar}
metrics = pd.DataFrame(metrics)
return metrics
def single_port_metric(self, df_prices, port_val, compare_assets=False):
"""Compute performance metrics for a given portfolio/asset"""
# Merge port_val with data
df_prices = df_prices.iloc[-len(port_val):]
df_prices['port_val'] = port_val
df_prices.dropna(inplace=True)
df_ret = df_prices.pct_change().dropna()
# Annual returns, std
n_years = len(port_val) / 252
excess_ret = df_ret['port_val'] - df_ret['T-bills rf']
excess_cagr = ((1+excess_ret).prod())**(1/n_years) - 1
excess_std = excess_ret.std(ddof=1) * np.sqrt(252)
sharpe = excess_cagr / excess_std
# Drawdown
peaks = np.maximum.accumulate(port_val)
drawdown = -(port_val-peaks) / peaks
max_drawdown = np.max(drawdown)
max_drawdown_end = np.argmax(drawdown)
max_drawdown_beg = np.argmax(port_val[:max_drawdown_end])
drawdown_dur = max_drawdown_end - max_drawdown_beg # TODO not showing correct values
calmar = excess_cagr / max_drawdown
metrics = {'excess_return': excess_cagr,
'excess_std': excess_std,
'sharpe': sharpe,
'max_drawdown': max_drawdown,
'max_drawdown_dur': drawdown_dur,
'calmar_ratio': calmar}
return metrics
def mulitple_port_metrics(self, df_port_val):
"""Compute performance metrics for a given portfolio/asset"""
df = pd.DataFrame()
for type, df_groupby in df_port_val.groupby(['short_cons', 'D_max']):
df_prices = df_groupby.drop(columns=['short_cons', 'D_max', 'timestamp'])
df_rets = df_prices.pct_change().dropna()
# Annual returns, std
n_years = len(df_rets) / 252
ret = df_rets.drop('T-bills rf', axis=1)
cagr = ((1 + ret).prod(axis=0)) ** (1 / n_years) - 1
std = ret.std(axis=0, ddof=1) * np.sqrt(252)
excess_ret = df_rets.subtract(df_rets['T-bills rf'], axis=0).drop('T-bills rf', axis=1)
excess_cagr = ((1 + excess_ret).prod(axis=0)) ** (1 / n_years) - 1
excess_std = excess_ret.std(axis=0 ,ddof=1) * np.sqrt(252)
sharpe = excess_cagr / excess_std
df_prices = df_prices.drop('T-bills rf', axis=1)
peaks = df_prices.cummax(axis=0)
drawdown = -(df_prices - peaks) / peaks
max_drawdown = drawdown.max(axis=0)
"""
max_drawdown_end = np.argmax(drawdown, axis=0)
max_drawdown_beg = np.argmax(drawdown[:max_drawdown_end], axis=0)
drawdown_dur = max_drawdown_end - max_drawdown_beg # TODO not showing correct values
"""
calmar = excess_cagr / max_drawdown
metrics = {'return': cagr,
'std': std,
'excess_return': excess_cagr,
'excess_std': excess_std,
'sharpe': sharpe,
'max_drawdown': max_drawdown,
'calmar_ratio': calmar}
df_temp = pd.DataFrame(metrics)
df_temp['short_cons'] = type[0]
df_temp['D_max'] = type[1]
df = df.append(df_temp)
return df
def plot_port_val(self, data, mpc_val, equal_w_val, start=None, savefig=None):
# Prepare data
equal_w_val = equal_w_val[-len(mpc_val):]
data.dropna(inplace=True)
data = data.iloc[-len(mpc_val):]
data['MPC'] = mpc_val
data['1/n'] = equal_w_val
data = data[['MPC', '1/n']] # Drop all other cols
if not start == None:
data = data.loc[start:]
data = data / data.iloc[0] * 100
# Plotting
plt.rcParams.update({'font.size': 15})
fig, ax = plt.subplots(nrows=1, ncols=1, sharex=True, figsize=(15,10))
ax.plot(data.index, data)
# ax[0].set_yscale('log')
ax.set_ylabel('$P_t$')
plt.tight_layout()
if not savefig == None:
plt.savefig('./images/' + savefig)
plt.show() | python | 18 | 0.569317 | 176 | 44.842857 | 490 |
Backtester for Hidden Markov Models.
Parameters
----------
Attributes
----------
preds : ndarray of shape (n_samples-window_len, n_preds, n_assets)
mean predictions for each asset h time steps into the future at each time t.
cov : ndarray of shape(n_samples-window_len, n_preds, n_assets, n_assets)
predicted covariance matrix h time steps into the future at each time t.
| class |
class DriveOpen:
""" Context manager for generically opening drive filepaths
"""
def __init__(self, filepath, mode='wb'):
self.is_drive = (type(filepath) is GoogleDrivePath)
self.drive_path = filepath if self.is_drive else open(filepath, mode=mode)
self.mode = mode
def __enter__(self):
if self.mode == 'rb' and self.is_drive:
self.read_buffer = self.drive_path.read()
return self.read_buffer
return self.drive_path
def __exit__(self, exc_type, exc_value, traceback):
if not self.is_drive:
self.drive_path.close()
elif self.mode == 'rb':
self.read_buffer.close() | python | 12 | 0.592163 | 82 | 33.5 | 20 | Context manager for generically opening drive filepaths
| class |
class NeuralNetwork:
"""
Defines a neural network with one hidden layer to do binary classification
"""
def __init__(self, nx, nodes):
"""
Constructor method
------------------
nx: it's the number of input features to the neuron
nodes: it's the number of nodes found in the hidden layer
W1: The weights vector for the hidden layer. Upon instantiation, it
should be initialized using a random normal distribution.
b1: The bias for the hidden layer. Upon instantiation, it should be
initialized with 0’s.
A1: The activated output for the hidden layer. Upon instantiation, it
should be initialized to 0.
W2: The weights vector for the output neuron. Upon instantiation, it
should be initialized using a random normal distribution.
b2: The bias for the output neuron. Upon instantiation, it should be
initialized to 0.
A2: The activated output for the output neuron (prediction). Upon
instantiation, it should be initialized to 0.
"""
if type(nx) is not int:
raise TypeError('nx must be an integer')
if nx < 1:
raise ValueError('nx must be a positive integer')
if type(nodes) is not int:
raise TypeError('nodes must be an integer')
if nodes < 1:
raise ValueError('nodes must be a positive integer')
self.W1 = np.random.randn(nodes, nx)
self.b1 = np.zeros((nodes, 1))
self.A1 = 0
self.W2 = np.random.randn(1, nodes)
self.b2 = 0
self.A2 = 0 | python | 11 | 0.605583 | 78 | 41.282051 | 39 |
Defines a neural network with one hidden layer to do binary classification
| class |
class Range:
"""Immutable representation of PostgreSQL `range` type."""
__slots__ = '_lower', '_upper', '_lower_inc', '_upper_inc', '_empty'
def __init__(self, lower=None, upper=None, *,
lower_inc=True, upper_inc=False,
empty=False):
self._empty = empty
if empty:
self._lower = self._upper = None
self._lower_inc = self._upper_inc = False
else:
self._lower = lower
self._upper = upper
self._lower_inc = lower is not None and lower_inc
self._upper_inc = upper is not None and upper_inc
@property
def lower(self):
return self._lower
@property
def lower_inc(self):
return self._lower_inc
@property
def lower_inf(self):
return self._lower is None and not self._empty
@property
def upper(self):
return self._upper
@property
def upper_inc(self):
return self._upper_inc
@property
def upper_inf(self):
return self._upper is None and not self._empty
@property
def isempty(self):
return self._empty
def _issubset_lower(self, other):
if other._lower is None:
return True
if self._lower is None:
return False
return self._lower > other._lower or (
self._lower == other._lower
and (other._lower_inc or not self._lower_inc)
)
def _issubset_upper(self, other):
if other._upper is None:
return True
if self._upper is None:
return False
return self._upper < other._upper or (
self._upper == other._upper
and (other._upper_inc or not self._upper_inc)
)
def issubset(self, other):
if self._empty:
return True
if other._empty:
return False
return self._issubset_lower(other) and self._issubset_upper(other)
def issuperset(self, other):
return other.issubset(self)
def __bool__(self):
return not self._empty
def __eq__(self, other):
if not isinstance(other, Range):
return NotImplemented
return (
self._lower,
self._upper,
self._lower_inc,
self._upper_inc,
self._empty
) == (
other._lower,
other._upper,
other._lower_inc,
other._upper_inc,
other._empty
)
def __hash__(self):
return hash((
self._lower,
self._upper,
self._lower_inc,
self._upper_inc,
self._empty
))
def __repr__(self):
if self._empty:
desc = 'empty'
else:
if self._lower is None or not self._lower_inc:
lb = '('
else:
lb = '['
if self._lower is not None:
lb += repr(self._lower)
if self._upper is not None:
ub = repr(self._upper)
else:
ub = ''
if self._upper is None or not self._upper_inc:
ub += ')'
else:
ub += ']'
desc = '{}, {}'.format(lb, ub)
return '<Range {}>'.format(desc)
__str__ = __repr__ | python | 15 | 0.484264 | 74 | 23.772059 | 136 | Immutable representation of PostgreSQL `range` type. | class |
class Barcode:
"""
A class used to represent a barcode.
Attributes
----------
info : str
decoded barcode value
type : int
a type of barcode (e.g. EAN-13)
points : numpy.array
vertices of barcode rectangle
Methods
-------
Draw(image)
Draws barcode's rectangle and its value to the given image.
"""
def __init__(self, binfo, btype, points):
self.info = binfo
self.type = btype
self.points = points
def __str__(self):
return str(self.info) + " " + str(self.type)
def Draw(self, image):
p1 = np.array(self.points[0], dtype=int)
p2 = np.array(self.points[2], dtype=int)
cv2.rectangle(image, p1, p2, (255, 0, 0))
cv2.putText(image, "{}".format(self.info), p2, cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 0, 0), 1, cv2.LINE_AA) | python | 11 | 0.557471 | 114 | 26.21875 | 32 |
A class used to represent a barcode.
Attributes
----------
info : str
decoded barcode value
type : int
a type of barcode (e.g. EAN-13)
points : numpy.array
vertices of barcode rectangle
Methods
-------
Draw(image)
Draws barcode's rectangle and its value to the given image.
| class |
class Colour:
"""
The colour class - used to unify all representations of colour as needed
by third-party modules.
This class also switches the colour around to fit the theme of the code jam.
Parameters
----------
colour: int or str
The colour inputted (given by the text box Entry)
All examples are with the Colour initialised with Colour("15715755")
Attributes
----------
fake_colour: str
The colour in hex before reformatting
e.g. "efcdab"
r: str
The amount of red in hex format.
e.g. "ab"
g: str
The amount of green in hex format.
e.g. "cd"
b: str
The amount of blue in hex format.
e.g. "ef"
colour: str
The colour in hex after the format is switched.
e.g. "abcdef"
as_hex: str
The colour prefixed with #
This is the most common way to represent a colour, and the main one
used by TK/TCL.
e.g. "#abcdef"
as_int: int
The colour in an integer with the hex converted into denary.
e.g. 11259375
as_rgb: tuple[int]
The colour in an (r, g, b) tuple.
e.g. (171, 205, 239)
Methods
-------
from_rgb: classmethod
Creates class from an (r, g, b) tuple.
"""
def __init__(self, colour: typing.Union[str, int]):
try:
int(colour)
except ValueError:
raise TypeError
if int(colour) not in range(16_777_216):
raise ValueError
self.fake_colour = hex(int(colour))[2:]
self.fake_colour = "0" * (6 - len(self.fake_colour)) + self.fake_colour
self.b = self.fake_colour[0:2]
self.g = self.fake_colour[2:4]
self.r = self.fake_colour[4:6]
self.colour = self.r + self.g + self.b
self.as_hex = "#" + self.colour
self.as_int = int(self.colour, 16)
@property
def as_rgb(self):
return (int(self.r, 16), int(self.g, 16), int(self.b, 16))
@classmethod
def from_rgb(cls, colour: typing.Tuple[int, int, int]):
r, g, b = map(lambda x: hex(x)[2:], colour)
fake = b + g + r
fake_int = int(fake, 16)
return cls(fake_int) | python | 14 | 0.559695 | 80 | 27.576923 | 78 |
The colour class - used to unify all representations of colour as needed
by third-party modules.
This class also switches the colour around to fit the theme of the code jam.
Parameters
----------
colour: int or str
The colour inputted (given by the text box Entry)
All examples are with the Colour initialised with Colour("15715755")
Attributes
----------
fake_colour: str
The colour in hex before reformatting
e.g. "efcdab"
r: str
The amount of red in hex format.
e.g. "ab"
g: str
The amount of green in hex format.
e.g. "cd"
b: str
The amount of blue in hex format.
e.g. "ef"
colour: str
The colour in hex after the format is switched.
e.g. "abcdef"
as_hex: str
The colour prefixed with #
This is the most common way to represent a colour, and the main one
used by TK/TCL.
e.g. "#abcdef"
as_int: int
The colour in an integer with the hex converted into denary.
e.g. 11259375
as_rgb: tuple[int]
The colour in an (r, g, b) tuple.
e.g. (171, 205, 239)
Methods
-------
from_rgb: classmethod
Creates class from an (r, g, b) tuple.
| class |
class WhatsappSession:
"""Wrapper around the Whatsapp class to remember state and do background scraping"""
def __init__(self, n_chats=2):
self.started_time = time.time()
self.w = Whatsapp(screenshot_folder="/tmp")
self._last_qr: str = None
self.links = None
self.lock = threading.Lock()
self._thread: Thread = None
self.status: str = "NOTSTARTED"
self._progress: int = None
self._message: str = None
self.n_chats: int = n_chats
def get_qr(self) -> str:
"""Go to whatsapp web and get the QR code"""
self._last_qr = self.w.get_qr()
return self._last_qr
def get_qr_status(self) -> dict:
"""Check if the user logged in and/or if a new QR code is displayed"""
if self.w.is_qr_scanned():
return {"status": "READY"}
try:
qr = self.w.get_qr()
except TimeoutException:
# Check if the app was loading the ready screen and is ready now
if self.w.is_qr_scanned():
return {"status": "READY"}
raise
if qr == self._last_qr:
return {"status": "WAITING"}
else:
self._last_qr = qr
return {"status": "REFRESH", "qr": qr}
def do_scrape(self):
logging.info("Starting scraper")
with self.lock:
if self.links is not None:
raise ValueError("Scraping already in progress")
self.links = []
self.status = "STARTED"
self._progress = 0
try:
self._do_scrape()
except Exception as e:
logging.exception("Error in scraper thread")
with self.lock:
self.status = "ERROR"
self._message = str(e)
self._progress = 0
else:
logging.info("Done!")
with self.lock:
self.status = "DONE"
self._message = f"Done, found {len(self.links)} in total"
self._progress = 100
finally:
self.w.quit_browser()
self.w = None
def _do_scrape(self):
time.sleep(3)
for i, chat in enumerate(self.w.get_all_chats()):
if i >= self.n_chats:
break
msg = f"Scraping contact {i + 1}/{self.n_chats}: {chat.text} [{len(self.links)} links found so far]"
logging.info(msg)
with self.lock:
self._progress = round(i * 100 / self.n_chats)
self._message = msg
links = list(self.w.get_links_per_chat(chat))
with self.lock:
self.links += links
def get_progress(self):
with self.lock:
return dict(status=self.status, progress=self._progress, message=self._message)
def start_scraping(self):
self._thread = threading.Thread(target=self.do_scrape)
logging.info("Starting thread")
self._thread.start() | python | 17 | 0.523762 | 112 | 34.833333 | 84 | Wrapper around the Whatsapp class to remember state and do background scraping | class |
class TODModeSet:
"""
The TODModeSet combines three pieces of information:
- det_uid, a (n_det,) array.
- weights, an (n_det,n_modes) array.
- modes, an (n_modes,n_samp) array.
"""
def __init__(self, det_uid, shape=None, dtype=None):
self.det_uid = det_uid
if shape is not None:
if len(shape) != 2:
raise ValueError('Expected shape=(n_modes, n_samp)')
self.modes = np.zeros(shape, dtype)
self.weights = np.zeros((len(self.det_uid), self.modes.shape[0]))
@classmethod
def from_fits_file(cls, filename):
def extract_table(sdb, keyfmt, dtype=None):
count = 0
while True:
if (keyfmt % count) not in sdb.dtype.names:
break
count += 1
if dtype is None:
dtype = sdb[keyfmt % 0].dtype
output = np.zeros((count, len(sdb)), dtype)
for i in range(count):
output[i,:] = sdb[keyfmt%i]
return output
data1 = moby2.util.StructDB.from_fits_table(filename, index=1)
data2 = moby2.util.StructDB.from_fits_table(filename, index=2)
self = cls(det_uid=data1['det_uid'])
self.weights = extract_table(data1, 'weight%i').transpose()
self.modes = extract_table(data2, 'mode%i')
return self
def to_fits_file(self, filename=None):
prihdr = fits.Header()
n_modes, n_samp = self.modes.shape
prihdr['n_modes'] = n_modes
prihdu = fits.PrimaryHDU(header=prihdr)
tb0 = moby2.util.StructDB.from_data(
[('det_uid', self.det_uid)] + [
('weight%i'%i, self.weights[:,i]) for i in range(n_modes)]
).to_fits_table()
tb1 = moby2.util.StructDB.from_data(
[('mode%i'%i, self.modes[i]) for i in range(n_modes)]
).to_fits_table()
hdulist = fits.HDUList([prihdu, tb0, tb1])
if filename is not None:
hdulist.writeto(filename, clobber=True)
return hdulist
@classmethod
def from_hdf(cls, target):
cls.check_class(target, 'tod_modeset', 1)
self = cls(det_uid=target['det_uid'])
self.weights = np.array(target['weights'])
self.modes = np.array(target['modes'])
return self
def to_hdf(self, target):
kw = {'compression': 'gzip'}
target.create_dataset('det_uid', data=self.det_uid.astype('uint32'), **kw)
target.create_dataset('weights', data=self.weights.astype('float32'), **kw)
target.create_dataset('modes', data=self.modes.astype('float32'), **kw)
cls.set_class(target, 'tod_modeset', 1)
def get_tod(self, dets=None, dtype=None, mode_idx=None):
"""
Return weights dot modes for the desired dets.
"""
if dets is None:
dets = list(range(0, self.weights.shape[0]))
if mode_idx is None:
mode_idx = list(range(0, len(self.modes)))
if np.asarray(dets).ndim == 0:
return np.dot(self.weights[dets,mode_idx], self.modes[mode_idx])
output = np.empty((len(dets), len(self.modes[0])), dtype=dtype)
for j,i in enumerate(dets):
output[j,:] = np.dot(self.weights[i,mode_idx], self.modes[mode_idx])
return output
def remove_modes(self, target, dets=None):
if dets is None:
dets = range(0, self.weights.shape[0])
amps = np.array(np.transpose(self.weights), order='C')
if self.modes.dtype == np.float64:
moby2.libactpol.remove_modes64(
target, np.array(dets).astype('int32'), self.modes, amps)
elif self.modes.dtype == np.float32:
moby2.libactpol.remove_modes(
target, np.array(dets).astype('int32'), self.modes, amps)
else:
raise ValueError('Fast mode removal only supported for '
'self.modes.dtype float32 and float64.') | python | 16 | 0.561311 | 83 | 40.635417 | 96 |
The TODModeSet combines three pieces of information:
- det_uid, a (n_det,) array.
- weights, an (n_det,n_modes) array.
- modes, an (n_modes,n_samp) array.
| class |
class AnnotatedSpan:
"""
An HTML-like annotation applied to a span of offsets.
The label is the primary label to be applied to the region.
Additionally, key-value metadata (attributes) can be applied.
When rendered as HTML, the primary label will become the tag and the metadata will
becomes attributes.
"""
label: str = attrib(validator=instance_of(str))
span: Span = attrib(validator=instance_of(Span))
attributes: Mapping[str, str] = attrib(
default=immutabledict(), converter=immutabledict
)
@staticmethod
def create_div_of_class(span: Span, clazz: str) -> "AnnotatedSpan":
return AnnotatedSpan(DIV, span, {"class": clazz})
@staticmethod
def create_span_of_class(span: Span, clazz: str) -> "AnnotatedSpan":
return AnnotatedSpan(SPAN, span, {"class": clazz}) | python | 12 | 0.682353 | 86 | 34.458333 | 24 |
An HTML-like annotation applied to a span of offsets.
The label is the primary label to be applied to the region.
Additionally, key-value metadata (attributes) can be applied.
When rendered as HTML, the primary label will become the tag and the metadata will
becomes attributes.
| class |
class ProgressBar:
"""Implement a console progress bar into a processing loop.
Args:
total_values (int, optional): Total number of iterations.
Defaults to 25.
bar_len (int, optional): Complete length of the progress bar, in chars.
Defaults to 25
symbol (str, optional): The symbol which is used to track progress.
Defaults to ``'.'``.
color (str, optional): Colour of the progress bar; where only the first
letter of the colour is required.
Options are: red, green, yellow, blue, magenta, cyan, white.
Defaults to 'w' (white).
:Design:
This is a simple console progress bar which should be called
**inside** a processing loop.
On instantiation, you can pass in the bar colour, length and
symbol parameters if you want to configure the appearance a
little bit.
:Colour Options:
red, green, yellow, blue, magenta, cyan, white
:Example:
You might implement the progress bar in a loop like this::
>>> import time
>>> from utils4.progressbar import ProgressBar
>>> pb = ProgressBar(total_values=25,
bar_len=25,
symbol='#',
color='red')
>>> for i range(26):
>>> # < some processing >
>>> pb.update_progress(current=i)
>>> # Optional pause to see updates.
>>> time.sleep(.1)
Processing 25 of 25 [ ......................... ] 100% Complete
"""
def __init__(self, total_values: int=25, bar_len: int=25, symbol: str='.', color: str='w'):
"""Progress bar class initialiser."""
self._total = total_values
self._bar_len = bar_len
self._symbol = symbol
self._color = color
self._len = len(str(self._total))
self._rst = '\x1b[0m'
self._clr = self._getcolor()
def update_progress(self, current: int): # pragma: nocover
"""Incrementally update the progress bar.
Args:
current (int): Index value for the current iteration.
This value is compared against the initialised ``total_values``
parameter to determine the current position in the overall
progress.
:Example:
Refer to the :class:`~ProgressBar` class docstring.
"""
# Calculate percent complete.
percent = float(current) / self._total
# Number of ticks.
ticks = self._symbol * int(round(percent * self._bar_len))
# Number of space placeholders.
spaces = ' ' * (self._bar_len - len(ticks))
msg = (f'{self._clr}'
f'\rProcessing {str(current).zfill(self._len)} of {self._total} [ {ticks+spaces} ] '
f'{percent*100:.0f}% Complete{self._rst}')
sys.stdout.write(msg)
sys.stdout.flush()
def _getcolor(self) -> str:
"""Create ANSI colour escape sequence to user's colour.
Returns:
str: ANSI escape sequence string for the user's colour.
"""
clrs = {'r': 31, 'g': 32, 'y': 33, 'b': 34, 'm': 35, 'c': 36, 'w': 37}
seq = f'\033[{clrs.get(self._color[0])};40m'
return seq | python | 15 | 0.541047 | 99 | 35.16129 | 93 | Implement a console progress bar into a processing loop.
Args:
total_values (int, optional): Total number of iterations.
Defaults to 25.
bar_len (int, optional): Complete length of the progress bar, in chars.
Defaults to 25
symbol (str, optional): The symbol which is used to track progress.
Defaults to ``'.'``.
color (str, optional): Colour of the progress bar; where only the first
letter of the colour is required.
Options are: red, green, yellow, blue, magenta, cyan, white.
Defaults to 'w' (white).
:Design:
This is a simple console progress bar which should be called
**inside** a processing loop.
On instantiation, you can pass in the bar colour, length and
symbol parameters if you want to configure the appearance a
little bit.
:Colour Options:
red, green, yellow, blue, magenta, cyan, white
:Example:
You might implement the progress bar in a loop like this::
>>> import time
>>> from utils4.progressbar import ProgressBar
>>> pb = ProgressBar(total_values=25,
bar_len=25,
symbol='#',
color='red')
>>> for i range(26):
>>> # < some processing >
>>> pb.update_progress(current=i)
>>> # Optional pause to see updates.
>>> time.sleep(.1)
Processing 25 of 25 [ ......................... ] 100% Complete
| class |
class CommandStack:
"""
Stack of command tokens that can be navigated forward and backward with undo/redo
"""
stack = list()
nextIndex = 0
maxIndex = 0
@staticmethod
def setTaskTree(taskTree):
"""
Set the database on which commands will act
"""
CommandStack.taskTree = taskTree
@staticmethod
def push(token, inredo):
"""
Add a new command token to the top of the stack
"""
CommandStack.nextIndex += 1
if inredo == False:
CommandStack.stack.insert(CommandStack.nextIndex - 1, token)
CommandStack.maxIndex = CommandStack.nextIndex
@staticmethod
def pop():
"""
Remove a command token from the top of the stack and return it
"""
token = CommandStack.stack[CommandStack.nextIndex - 1]
CommandStack.nextIndex -= 1
return token
@staticmethod
def undo():
"""
Roll back the previous command if possible. Return 'True' if possible.
"""
if CommandStack.nextIndex == 0:
return False
else:
CommandStack.pop().undo()
return True
@staticmethod
def redo():
"""
Go forward from a previously undone command if possible. Return 'True' if possible.
"""
if CommandStack.nextIndex == CommandStack.maxIndex:
return False
else:
CommandStack.stack[CommandStack.nextIndex].execute(True)
return True | python | 14 | 0.574823 | 91 | 23.730159 | 63 |
Stack of command tokens that can be navigated forward and backward with undo/redo
| class |
class TodoCommand:
"""
Class for 'todo' commands in todoshell
"""
def __init__(self, task):
self.task = task
def execute(self, inredo=False):
"""
Execute this command
"""
self.label = CommandStack.taskTree.insertTask(self.task)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.deleteTask(self.label) | python | 10 | 0.559211 | 64 | 18.869565 | 23 |
Class for 'todo' commands in todoshell
| class |
class TodosubCommand:
"""
Class for 'todosub' commands in todoshell
"""
def __init__(self, task, parentLabel):
self.task = task
self.parentLabel = parentLabel
def execute(self, inredo=False):
"""
Execute this command
"""
self.label = CommandStack.taskTree.insertTask(self.task, self.parentLabel)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.deleteTask(self.label) | python | 10 | 0.588346 | 82 | 21.208333 | 24 |
Class for 'todosub' commands in todoshell
| class |
class DoneCommand:
"""
Class for 'done' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
CommandStack.taskTree.setDone(self.label)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.setUndone(self.label) | python | 9 | 0.555305 | 51 | 18.304348 | 23 |
Class for 'done' commands in todoshell
| class |
class RemoveCommand:
"""
Class for 'remove' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
self.trace = CommandStack.taskTree.deleteTask(self.label)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.insertTrace(self.trace) | python | 10 | 0.567742 | 65 | 19.26087 | 23 |
Class for 'remove' commands in todoshell
| class |
class MoveUpCommand:
"""
Class for 'move up' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
self.newLabel = CommandStack.taskTree.moveTaskUp(self.label)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.moveTaskDown(self.newLabel) | python | 10 | 0.572939 | 68 | 19.608696 | 23 |
Class for 'move up' commands in todoshell
| class |
class MoveDownCommand:
"""
Class for 'move down' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
self.newLabel = CommandStack.taskTree.moveTaskDown(self.label)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.moveTaskUp(self.newLabel) | python | 10 | 0.57652 | 70 | 19.782609 | 23 |
Class for 'move down' commands in todoshell
| class |
class MoveTopCommand:
"""
Class for 'move top' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
(self.newLabel, self.oldPosition) = CommandStack.taskTree.moveTask(self.label, 1)
CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
"""
CommandStack.taskTree.moveTask(self.newLabel, self.oldPosition) | python | 10 | 0.584314 | 89 | 21.217391 | 23 |
Class for 'move top' commands in todoshell
| class |
class MoveBottomCommand:
"""
Class for 'move bottom' commands in todoshell
"""
def __init__(self, label):
self.label = label
def execute(self, inredo=False):
"""
Execute this command
"""
# self.newLabel = CommandStack.taskTree.moveTaskBottom(self.label)
# CommandStack.push(self, inredo)
def undo(self):
"""
Undo this command
""" | python | 8 | 0.555814 | 74 | 19.52381 | 21 |
Class for 'move bottom' commands in todoshell
| class |
class Resolver:
"""Resolves system delta, validates system configuration."""
_LOG = logging.getLogger(__name__)
def __init__(self, *, resolvers_map: Dict[str, component.Resolver]):
self._resolvers_map = resolvers_map
self._validators = [
validate.NameConventionValidator(),
validate.NameUniquenessValidator()]
def load_checked_delta(self, target: model.Spec) -> model.Delta:
target_descriptions = self._get_descriptions(target)
assert len(target_descriptions) == len(target.specs)
self._validate_target(target_descriptions)
current = self.load_current()
delta = self._build_delta(current, target)
self._check_dependencies(
current=current, target_descriptions=target_descriptions)
self._order_delta(delta)
return delta
def load_current(self) -> model.Spec:
spec = model.Spec(specs=[])
for resolver in self._resolvers_map.values():
for name in resolver.system_list():
spec.specs.append(resolver.system_get(name))
return spec
def _get_descriptions(self, target: model.Spec) -> List[model.Description]:
descriptions = list()
for spec in target.specs:
try:
assert spec.resource_type in self._resolvers_map, \
f"Resource type [{spec.resource_type}] does not have a corresponding registered resolver"
description = self._resolvers_map[spec.resource_type].describe(spec)
descriptions.append(description)
except Exception as e:
raise ValueError(f"Could not describe resource [{spec.full_name()}]: {str(e)}")
return descriptions
def _validate_target(self, target_descriptions: List[model.Description]) -> None:
for validator in self._validators:
validator.validate_target(descriptions=target_descriptions)
self._check_schema(target_descriptions)
def _check_schema(self, descriptions: List[model.Description]) -> None:
schemas = self._load_schemas(descriptions)
for description in descriptions:
if description.spec.schema_name:
expected = schemas.get(description.spec.schema_name)
if not expected:
raise ValueError(f"Resource [{description.spec.full_name}] "
f"requires schema [{description.spec.schema_name}] which is not defined.")
# TODO: Should compare different order of elements.
if description.schema != expected:
raise ValueError(f"Resource [{description.spec.full_name}] schema mismatch. "
f"Expected [{expected}], actual [{description.schema}].")
def _load_schemas(self, descriptions: List[model.Description]) -> Dict[str, model.SchemaParams]:
schemas: Dict[str, model.SchemaParams] = {}
for desc in descriptions:
if desc.spec.resource_type == model.RESOURCE_SCHEMA:
assert desc.spec.name not in schemas, f"Duplicated schema name [{desc.spec.name}]"
schemas[desc.spec.name] = desc.schema
return schemas
def _check_dependencies(self, *,
current: model.Spec,
target_descriptions: List[model.Description]) -> None:
# TODO Consider resources added or removed with delta
# TODO Stream and Table checks for UDFs
for desc in target_descriptions:
for dep in desc.depends:
found = False
for curr in current.specs:
if curr.resource_type == dep.resource_type \
and curr.name.lower() == dep.name.lower():
found = True
break
if not found:
raise ValueError(f"Resource {desc.spec.resource_type.capitalize()} [{desc.spec.name}] "
f"depends on {dep.resource_type.capitalize()} [{dep.name}]"
f"which was not found in the system")
def _order_delta(self, delta: model.Delta) -> None:
orders = {
model.RESOURCE_TOPIC: 1,
model.RESOURCE_SCHEMA: 2,
model.RESOURCE_SOURCE: 3,
model.RESOURCE_TABLE: 4,
model.RESOURCE_STREAM: 5,
model.RESOURCE_SINK: 6}
tuples = list()
for item in delta.items:
pos = orders.get(item.resource_type)
assert pos, f"Order position not defined for {item.resource_type}"
tuples.append((pos, item))
tuples = sorted(tuples, key=lambda x: x[0])
delta.items = [item[1] for item in tuples]
def _build_delta(self, current: model.Spec, target: model.Spec) -> model.Delta:
# System can have multiple items with the same name but different types.
current_map: Dict[str, List[model.SpecItem]] = {}
for spec in current.specs:
if spec.name.lower() in current_map:
current_map[spec.name.lower()].append(spec)
else:
current_map[spec.name.lower()] = [spec]
delta = model.Delta(items=[])
for target_spec in target.specs:
found = False
if target_spec.name.lower() in current_map:
for current_item in current_map.get(target_spec.name.lower()):
if current_item.resource_type == target_spec.resource_type:
found = True
resolver = self._resolvers_map[target_spec.resource_type]
if not resolver.equals(current_item, target_spec):
self._LOG.info(f"{target_spec.resource_type} [{target_spec.name}] changes")
delta.items.append(model.DeltaItem(
deleted=False,
resource_type=target_spec.resource_type,
current=current_item,
target=target_spec))
else:
self._LOG.info(f"{target_spec.resource_type} [{target_spec.name}] remains the same")
break
if not found:
self._LOG.info(f"{target_spec.resource_type} [{target_spec.name}] is new")
delta.items.append(model.DeltaItem(
deleted=False,
resource_type=target_spec.resource_type,
current=None,
target=target_spec))
return delta | python | 22 | 0.562016 | 112 | 47.266187 | 139 | Resolves system delta, validates system configuration. | class |
class Config:
'''
General configuration parent class
'''
NEWS_API_BASE_URL ='https://newsapi.org/v2/top-headlines?country={}&apiKey=dbfa40f35ae24c188d04adfd4ebbd2a3'
NEWS_API_KEY = 'dbfa40f35ae24c188d04adfd4ebbd2a3'
NEWS_API_SEARCH_URL = 'https://newsapi.org/v2/everything?q={}&apiKey=dbfa40f35ae24c188d04adfd4ebbd2a3'
NEWS_API_SOURCE_URL = 'https://newsapi.org/v2/sources?apiKey=dbfa40f35ae24c188d04adfd4ebbd2a3'
TOP_HEADLINES_URL = 'https://newsapi.org/v2/top-headlines?sources={}&sortBy=latest&apiKey=dbfa40f35ae24c188d04adfd4ebbd2a3' | python | 6 | 0.760984 | 127 | 62.333333 | 9 |
General configuration parent class
| class |
class ScrapedRootCertificateRecord:
"""A root certificate subject name and fingerprint scraped from a list of root records (Apple's, MSFT, etc.).
It needs to be validated and sanitized by the RootRecordsValidator before we can do anything with it.
"""
def __init__(
self, subject_name: str, fingerprint: bytes, fingerprint_hash_algorithm: Union[hashes.SHA1, hashes.SHA256]
) -> None:
self.subject_name = subject_name
self.fingerprint = fingerprint
self.fingerprint_hash_algorithm = fingerprint_hash_algorithm | python | 11 | 0.711744 | 114 | 45.916667 | 12 | A root certificate subject name and fingerprint scraped from a list of root records (Apple's, MSFT, etc.).
It needs to be validated and sanitized by the RootRecordsValidator before we can do anything with it.
| class |
class TestMain:
"""Unit tests for main() function."""
def test_instantiate_worker(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker.call_args
assert args == ("download_fvcom_results",)
assert list(kwargs.keys()) == ["description"]
def test_init_cli(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
m_worker().init_cli.assert_called_once_with()
def test_add_host_name_arg(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().cli.add_argument.call_args_list[0]
assert args == ("host_name",)
assert "help" in kwargs
def test_add_model_config_arg(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().cli.add_argument.call_args_list[1]
assert args == ("model_config",)
assert kwargs["choices"] == {"r12", "x2"}
assert "help" in kwargs
def test_add_run_type_arg(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().cli.add_argument.call_args_list[2]
assert args == ("run_type",)
expected = {"nowcast", "forecast"}
assert kwargs["choices"] == expected
assert "help" in kwargs
def test_add_run_date_arg(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().cli.add_date_option.call_args_list[0]
assert args == ("--run-date",)
assert kwargs["default"] == arrow.now().floor("day")
assert "help" in kwargs
def test_run_worker(self, m_worker):
m_worker().cli = Mock(name="cli")
download_fvcom_results.main()
args, kwargs = m_worker().run.call_args
assert args == (
download_fvcom_results.download_fvcom_results,
download_fvcom_results.success,
download_fvcom_results.failure,
) | python | 13 | 0.585594 | 71 | 37.196429 | 56 | Unit tests for main() function. | class |
class TestConfig:
"""Unit tests for production YAML config file elements related to worker."""
def test_message_registry(self, prod_config):
assert "download_fvcom_results" in prod_config["message registry"]["workers"]
msg_registry = prod_config["message registry"]["workers"][
"download_fvcom_results"
]
assert msg_registry["checklist key"] == "VHFR FVCOM results files"
@pytest.mark.parametrize(
"msg",
(
"success x2 nowcast",
"failure x2 nowcast",
"success x2 forecast",
"failure x2 forecast",
"success r12 nowcast",
"failure r12 nowcast",
"crash",
),
)
def test_message_types(self, msg, prod_config):
msg_registry = prod_config["message registry"]["workers"][
"download_fvcom_results"
]
assert msg in msg_registry
def test_run_types_section(self, prod_config):
run_types = prod_config["vhfr fvcom runs"]["run types"]
assert run_types["nowcast x2"] == {
"nemo boundary results": "/nemoShare/MEOPAR/SalishSea/nowcast/",
"time step": 0.5,
"results": "/nemoShare/MEOPAR/SalishSea/fvcom-nowcast-x2/",
}
assert run_types["forecast x2"] == {
"nemo boundary results": "/nemoShare/MEOPAR/SalishSea/forecast/",
"time step": 0.5,
"results": "/nemoShare/MEOPAR/SalishSea/fvcom-forecast-x2/",
}
assert run_types["nowcast r12"] == {
"nemo boundary results": "/nemoShare/MEOPAR/SalishSea/nowcast/",
"time step": 0.2,
"results": "/nemoShare/MEOPAR/SalishSea/fvcom-nowcast-r12/",
}
def test_results_archive_section(self, prod_config):
results_archive = prod_config["vhfr fvcom runs"]["results archive"]
assert results_archive["nowcast x2"] == "/opp/fvcom/nowcast-x2/"
assert results_archive["forecast x2"] == "/opp/fvcom/forecast-x2/"
assert results_archive["nowcast r12"] == "/opp/fvcom/nowcast-r12/" | python | 12 | 0.585551 | 85 | 40.27451 | 51 | Unit tests for production YAML config file elements related to worker. | class |
class TestSuccess:
"""Unit tests for success() function."""
def test_success(self, m_logger, model_config, run_type):
parsed_args = SimpleNamespace(
host_name="arbutus.cloud",
model_config=model_config,
run_type=run_type,
run_date=arrow.get("2018-02-16"),
)
msg_type = download_fvcom_results.success(parsed_args)
assert m_logger.info.called
assert msg_type == f"success {model_config} {run_type}" | python | 13 | 0.600406 | 63 | 37 | 13 | Unit tests for success() function. | class |
class TestFailure:
"""Unit tests for failure() function."""
def test_failure(self, m_logger, model_config, run_type):
parsed_args = SimpleNamespace(
host_name="arbutus.cloud",
model_config=model_config,
run_type=run_type,
run_date=arrow.get("2018-02-16"),
)
msg_type = download_fvcom_results.failure(parsed_args)
assert m_logger.critical.called
assert msg_type == f"failure {model_config} {run_type}" | python | 13 | 0.603622 | 63 | 37.307692 | 13 | Unit tests for failure() function. | class |
class TestDownloadFVCOMResults:
"""Unit tests for download_fvcom_results() function."""
def test_checklist(
self, m_fix_perms, m_run_sub, m_logger, model_config, run_type, config
):
parsed_args = SimpleNamespace(
host_name="arbutus.cloud",
model_config=model_config,
run_type=run_type,
run_date=arrow.get("2018-02-16"),
)
checklist = download_fvcom_results.download_fvcom_results(parsed_args, config)
expected = {
run_type: {
"host": "arbutus.cloud",
"model config": model_config,
"run date": "2018-02-16",
"files": [],
}
}
assert checklist == expected
def test_scp_subprocess(
self, m_fix_perms, m_run_sub, m_logger, model_config, run_type, config
):
parsed_args = SimpleNamespace(
host_name="arbutus.cloud",
model_config=model_config,
run_type=run_type,
run_date=arrow.get("2018-02-16"),
)
download_fvcom_results.download_fvcom_results(parsed_args, config)
m_run_sub.assert_called_once_with(
shlex.split(
f"scp -Cpr "
f"arbutus.cloud:/nemoShare/MEOPAR/SalishSea/fvcom-{run_type}-{model_config}/16feb18 "
f"/opp/fvcom/{run_type}-{model_config}"
),
m_logger.debug,
m_logger.error,
) | python | 13 | 0.533602 | 101 | 34.452381 | 42 | Unit tests for download_fvcom_results() function. | class |
class MultitaskGatherTarget:
"""Gather the targets for multitask heads.
Args:
pipeline_list (list[list]): List of pipelines for all heads.
pipeline_indices (list[int]): Pipeline index of each head.
"""
def __init__(self,
pipeline_list,
pipeline_indices=None,
keys=('target', 'target_weight')):
self.keys = keys
self.pipelines = []
for pipeline in pipeline_list:
self.pipelines.append(Compose(pipeline))
if pipeline_indices is None:
self.pipeline_indices = list(range(len(pipeline_list)))
else:
self.pipeline_indices = pipeline_indices
def __call__(self, results):
# generate target and target weights using all pipelines
pipeline_outputs = []
for pipeline in self.pipelines:
pipeline_output = pipeline(results)
pipeline_outputs.append(pipeline_output.copy())
for key in self.keys:
result_key = []
for ind in self.pipeline_indices:
result_key.append(pipeline_outputs[ind].get(key, None))
results[key] = result_key
return results | python | 15 | 0.584298 | 71 | 34.617647 | 34 | Gather the targets for multitask heads.
Args:
pipeline_list (list[list]): List of pipelines for all heads.
pipeline_indices (list[int]): Pipeline index of each head.
| class |
class SetupTaskArguments:
"""
Organisation setup arguments.
"""
directions: Optional[Configuration] = None
"""
Non-interactive directions. Intended only for testing.
"""
configuration_loader: Optional[ConfigurationLoader] = None
log_level: str = "CRITICAL"
regions: Optional[List[str]] = None | python | 12 | 0.673653 | 62 | 24.769231 | 13 |
Organisation setup arguments.
| class |
class Quote:
"""Quote class to define quotes object"""
def __init__(self,author,id,quote,permalink):
self.author = author
self.id = id
self.quote = quote
self.permalink = permalink | python | 8 | 0.661692 | 47 | 24.25 | 8 | Quote class to define quotes object | class |
class SimulationResult:
"""Results from oogeso simulation
The results are stored in a set of multi-index Series, with
index names indicating what they are:
device - device identifier
node - node identifier
edge - edge identifier
carrier - network type ("el", "gas", "oil", "water", "hydrogen", "heat")
terminal - input/output ("in" or "out"),
time (integer timestep)
"""
# Input/output flow per device and network type:
device_flow: Optional[pd.Series] = None
# Device startup preparation status (boolean):
device_is_prep: Optional[pd.Series] = None
# Device on/off status (boolean):
device_is_on: Optional[pd.Series] = None
# Device starting status (boolean):
device_starting: Optional[pd.Series] = None
# Device stopping status (boolean):
device_stopping: Optional[pd.Series] = None
# Energy storage filling level (Sm3 or MJ)
device_storage_energy: Optional[pd.Series] = None
# Max available "flow" (power/fluid) from storage (Sm3/s or MW):
device_storage_pmax: Optional[pd.Series] = None
# Device assosiated penalty rate (PENALTY_UNIT/s):
penalty: Optional[pd.Series] = None
# Flow rate (Sm3/s or MW):
edge_flow: Optional[pd.Series] = None
# Loss rate (MW) - only relevant for energy flow (el and heat):
edge_loss: Optional[pd.Series] = None
# Voltage angle at node - only relevant for electricity floc computed via dc-pf:
el_voltage_angle: Optional[pd.Series] = None
# Pressure at node (MPa):
terminal_pressure: Optional[pd.Series] = None
# Direct flow between in and out terminal of node - relevant if there is no device inbetween:
terminal_flow: Optional[pd.Series] = None
# Emission rate (sum of all devices) (kgCO2/s):
co2_rate: Optional[pd.Series] = None
# Emission rate per device (kgCO2/s):
co2_rate_per_dev: Optional[pd.Series] = None
# Revenue rate for exported oil/gas (CURRENCY/s):
export_revenue: Optional[pd.Series] = None
# CO2 intensity of exported oil/gas (kgCO2/Sm3oe):
co2_intensity: Optional[pd.Series] = None
# Available online electrical reserve capacity (MW):
el_reserve: Optional[pd.Series] = None
# Available online electrical backup per device (MW):
el_backup: Optional[pd.Series] = None
# Value of duals (associated with constraints)
duals: Optional[pd.Series] = None
# Time-series profiles used in simulation (copied from the input)
profiles_forecast: Optional[pd.DataFrame] = None
profiles_nowcast: Optional[pd.DataFrame] = None
def append_results(self, sim_res):
exclude_list = ["df_profiles_forecast", "df_profiles_forecast"]
for my_field in fields(self):
field_name = my_field.name
if field_name not in exclude_list:
my_df = getattr(self, field_name)
other_df = getattr(sim_res, field_name)
if other_df is not None:
setattr(self, field_name, pd.concat([my_df, other_df]).sort_index()) | python | 19 | 0.665573 | 97 | 44.492537 | 67 | Results from oogeso simulation
The results are stored in a set of multi-index Series, with
index names indicating what they are:
device - device identifier
node - node identifier
edge - edge identifier
carrier - network type ("el", "gas", "oil", "water", "hydrogen", "heat")
terminal - input/output ("in" or "out"),
time (integer timestep)
| class |
class Helper: # pragma: no cover
"""
Helper(): helper functions for custom decks
"""
@staticmethod
def custom_suits_values_1():
"""
custom_suits_values_1():
sample custom desk to be used in tests
"""
# returns (suits_ranking, values_ranking)
return (
[
'Diamonds',
'Hearts',
],
[
'10',
'Jack',
'Queen',
'King',
]
)
@staticmethod
def custom_suits_values_2():
"""
custom_suits_values_2():
sample custom deck to be used in tests
"""
# returns (suits_ranking, values_ranking)
return (
[
'Sith',
'Jedi',
],
[
'Youngling',
'Padawan',
'Knight',
'Guardian',
'Master',
]
)
@staticmethod
def create_deck_manager(*args, **kwargs):
"""
create_deck_manager(): must be implemented by classes
that inherit this class
"""
raise NotImplementedError
@staticmethod
def normal_deck_suits():
"""
normal_deck_suits(): returns list of normal deck suits
"""
return [
'Spades',
'Diamonds',
'Hearts',
'Clubs',
]
@staticmethod
def normal_deck_values():
"""
normal_deck_values():
returns list of normal deck values
"""
return [
'2',
'3',
'4',
'5',
'6',
'7',
'8',
'9',
'10',
'Jack',
'Queen',
'King',
'Ace',
]
@staticmethod
def generate_player_names(count):
"""
generate_player_names(): generates list of player names
"""
return [
f"Player{x}" for x in range(1, count+1)
] | python | 12 | 0.389751 | 63 | 21.4 | 95 |
Helper(): helper functions for custom decks
| class |
class SequentialFitnessCaller:
"""
Fitness caller used for sequential implementation of NMMSO algorithm.
"""
def __init__(self):
self.problem = None
self.data = []
def set_problem(self, problem):
"""
Sets the problem object to use to calculate the fitness.
Arguments
---------
problem
Problem object implementing the fitness method.
"""
self.problem = problem
def add(self, location, userdata):
"""
Add a location to be evaluated.
Arguments
---------
location : numpy array
Location to be evaluated.
userdata
Userdata to be returned with the evaluation result.
"""
self.data.append((location, userdata))
def evaluate(self):
"""
Evaluates all the locations.
Returns
-------
list of (location, value, userdate) tuples
Tuples containing the location, value and corresponding user data
"""
result = []
for location, userdata in self.data:
value = self.problem.fitness(location)
result.append((location, value, userdata))
self.data = []
return result
def finish(self):
"""
Terminates the fitness caller.
"""
pass | python | 12 | 0.54267 | 77 | 22.655172 | 58 |
Fitness caller used for sequential implementation of NMMSO algorithm.
| class |
class RouterV2:
"""
API version 2 - HTTP Router class.
"""
def __init__(self, context: Context):
self._context = context
self._app = web.Application(middlewares=[self.middleware])
self._public = RouterV2Public(context)
self._app.add_subapp(u.public, self._public.app)
self._admin = RouterV2Admin(context)
self._app.add_subapp(u.admin, self._admin.app)
self._dev = RouterV2Dev(context)
self._app.add_subapp(u.dev, self._dev.app)
self._self = RouterV2Self(context)
self._app.add_subapp(u.self, self._self.app)
self._main = RouterV2Main(context)
self._app.add_subapp(u.main, self._main.app)
self._plugins = RouterV2Plugins(context)
self._app.add_subapp(u.plugins, self._plugins.app)
@property
def app(self) -> web.Application:
return self._app
@property
def context(self) -> Context:
return self._context
@staticmethod
def is_admin_router(request: Request) -> bool:
return request.path.startswith(u.api_v2_admin)
@staticmethod
def is_dev_router(request: Request) -> bool:
return request.path.startswith(u.api_v2_dev)
@staticmethod
def is_main_router(request: Request) -> bool:
return request.path.startswith(u.api_v2_main)
@staticmethod
def is_public_router(request: Request) -> bool:
return request.path.startswith(u.api_v2_public)
@staticmethod
def is_self_router(request: Request) -> bool:
return request.path.startswith(u.api_v2_self)
@staticmethod
def is_plugins_router(request: Request) -> bool:
return request.path.startswith(u.api_v2_plugins)
async def test_initialized_database(self) -> None:
if not await self.context.is_initialized_database():
raise HTTPReccUninitializedService
async def assign_session(self, request: Request) -> None:
try:
await assign_session(self.context, request)
except BaseException as e:
logger.exception(e)
raise HTTPReccAccessTokenError
@staticmethod
def has_admin_privileges(request: Request) -> bool:
assert c.session in request
session = request[c.session]
assert isinstance(session, SessionEx)
return session.is_admin
async def test_admin_privileges(self, request: Request) -> None:
if not self.has_admin_privileges(request):
raise HTTPForbidden(reason="Administrator privileges are required")
def test_developer_config(self) -> None:
if not self.context.config.developer:
raise HTTPServiceUnavailable(reason="Developer mode is not enabled")
# -----------------------------
# Middleware of the sub-routers
# -----------------------------
async def middleware_admin(self, request: Request, handler) -> Response:
await self.test_initialized_database()
await self.assign_session(request)
await self.test_admin_privileges(request)
return await handler(request)
async def middleware_dev(self, request: Request, handler) -> Response:
await self.test_initialized_database()
await self.assign_session(request)
await self.test_admin_privileges(request)
self.test_developer_config()
return await handler(request)
async def middleware_main(self, request: Request, handler) -> Response:
await self.test_initialized_database()
await self.assign_session(request)
return await handler(request)
async def middleware_public(self, request: Request, handler) -> Response:
assert self is not None, "Remove warning about 'method may be static'"
return await handler(request)
async def middleware_self(self, request: Request, handler) -> Response:
await self.test_initialized_database()
await self.assign_session(request)
return await handler(request)
async def middleware_plugins(self, request: Request, handler) -> Response:
await self.test_initialized_database()
await self.assign_session(request)
return await handler(request)
@web.middleware
async def middleware(self, request: Request, handler) -> Response:
if request.method == METH_OPTIONS:
return await handler(request) # (CORS) Default `options` handling.
if self.is_admin_router(request):
return await self.middleware_admin(request, handler)
elif self.is_dev_router(request):
return await self.middleware_dev(request, handler)
elif self.is_main_router(request):
return await self.middleware_main(request, handler)
elif self.is_public_router(request):
return await self.middleware_public(request, handler)
elif self.is_self_router(request):
return await self.middleware_self(request, handler)
elif self.is_plugins_router(request):
return await self.middleware_plugins(request, handler)
else:
raise HTTPNotFound() | python | 13 | 0.649696 | 80 | 35.485714 | 140 |
API version 2 - HTTP Router class.
| class |
class Session:
"""Store session variables for a limited time period.
The only functionality this class directly supports is calling an event
handler when the instance is destroyed. Session objects given to a
SessionManager are automatically cleared out of memory when their "time to
live" is exceeded. The manager must be started for such functionality."""
def __init__(self, time_to_live, on_destroyed=None):
"""Initialize timeout setting and deletion handler."""
self.__time_to_live = time_to_live
self.__on_destroyed = on_destroyed
self.wakeup()
def wakeup(self):
"""Refresh the last-accessed time of this session object.
This method is automatically called by the class initializer.
Instances also get a wakeup call when retrieved from a manager."""
self.__time = time.time()
def __bool__(self):
"""Calculate liveliness of object for manager."""
return time.time() - self.__time <= self.__time_to_live
def __del__(self):
"""Call deletion event handler if present.
Completely optional: an on_destroyed handler may be specified
when the object is created. Exception handling is non-existent."""
if self.__on_destroyed is not None:
self.__on_destroyed() | python | 10 | 0.664145 | 78 | 39.090909 | 33 | Store session variables for a limited time period.
The only functionality this class directly supports is calling an event
handler when the instance is destroyed. Session objects given to a
SessionManager are automatically cleared out of memory when their "time to
live" is exceeded. The manager must be started for such functionality. | class |
class Number:
"""
A class used to get information about a number.
...
Methods
--------
is_prime(n)
Checks if number is prime and returns True/False.
After 30 seconds times out and raises exception.
is_palindromic(n)
Checks if the number is palindromic (Reads the same from beginning and end).
After 5 seconds times out and raises exception.
is_square(n)
Checks if number is square (It's square root is integer).
After 5 seconds times out and raises exception.
is_triangle(n)
Checks if number is triangle.
After 5 seconds times out and raises exception.
get_factors(n)
Gets the factors of n.
get_divisors_check_semiprime_check_perfect(n)
Gets all n's divisors, check if n is semiprime and if n is perfect.
check_primality(n)
Checks if number is prime and handles eventual exception.
get_number_systems(n)
Gets n in binary, hexadecimal, octal and decimal systems.
check_additional(n)
Gets additional information about n - is_square, is_triangle, is_palindromic, is taxicab
and handles exceptions.
compare_speed(n)
Compares n to the speed of light and sound.
check_roots(n)
Check if n is a power of any number in range <2;11>.
check_bus(n)
Check if n is a number of a bus. If yes return the list of stops of this bus (in Poland).
check_year(n)
Assumes that n is a year and checks if it's leap.
check_phone(n)
Assumes that n is a phone number and checks from which country is it.
check_number(n)
Checks if n is a positive integer.
run(n)
Runs all of the above and returns all the information as a dictionary.
"""
def resource_path(self, relative_path):
base_path = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))
return os.path.join(base_path, relative_path)
def __init__(self):
self._smallprimeset = set(self._prime_generator(10000))
self._smallprimeset_n = 10000
# dictionary comes from - https://gist.github.com/Goles/3196253
f = open(self.resource_path('phone_numbers.json'), )
self._phone_data = json.load(f)
f.close()
self.divisors_tab = []
self.perfect = False
self.semiprime = False
self.semi_prime_factors = []
self.factors = []
def _prime_generator(self, n):
"""Returns an array of first n prime numbers
Parameters
----------
n : int
The number of prime numbers to return
Returns
---------
list
A list of first n prime numbers
"""
it = Iterator()
primes = []
for i in range(0, n):
prime = it.next_prime()
primes.append(prime)
return primes
@Timeout(10)
def is_prime(self, n, precision=30):
"""Checks if number is prime
Function uses Miller-Rabin primality test.
Algorithm is not 100% accurate. It can return a composite number as a prime,
but not the opposite.
Parameters
-----------
n : int
The number to check if it's prime
precision : int, optional
A number of repetitions of the algorithm. The higher the number the more precise is the result.
Raises
-------
TimeoutError
If the function has been running for 10 seconds
ValueError
If the number is not a positive integer
Returns
---------
True
If the number is prime
False
If the number isn't prime
"""
# http://en.wikipedia.org/wiki/Miller-Rabin_primality_test#Algorithm_and_running_time
try:
n = abs(int(n))
except ValueError:
pass
if n in [0, 1]:
return False
elif n in [2, 3, 5]:
return True
elif n % 2 == 0:
return False
elif n < self._smallprimeset_n:
return n in self._smallprimeset
# Miller-Rabin primality test
d = n - 1
s = 0
while d % 2 == 0:
d //= 2
s += 1
for repeat in range(precision):
a = random.randrange(2, n - 2)
x = pow(a, d, n)
if x == 1 or x == n - 1: continue
for r in range(s - 1):
x = pow(x, 2, n)
if x == 1: return False
if x == n - 1: break
else:
return False
return True
@Timeout(20)
def _get_divisors_semiprime_perfect(self, n):
"""Gets all n's divisors, checks if n is semiprime and checks if n is perfect.
It saves results in self.divisors_tab, self.semiprime and self.perfect
Parameters
-----------
n : int
The number to get it's divisors
Raises
-------
TimeoutError
If the function has been running for 20 seconds
Returns
--------
True
"""
i = 1
suma = -n
while i * i <= n:
if n % i == 0:
d = n // i
self.divisors_tab.append(i)
self.divisors_tab.append(d)
suma += i + d
if not self.semiprime:
if self.is_prime(i) and self.is_prime(d):
self.semiprime = True
self.semi_prime_factors = [i, d]
i += 1
if suma == n:
self.perfect = True
self.divisors_tab = sorted(list(set(self.divisors_tab)))
return True
@Timeout(5)
def is_palindromic(self, n):
""" Checks if number is palindromic.
Parameters
-----------
n : int
The number to check
Raises
------
TimeoutError
If has been running for 5 seconds
Return
-------
True
If is palindromic
False
If isn't palindromic
"""
n = list(str(n))
first_half = n[:len(n) // 2]
second_half = n[math.ceil(len(n) / 2):]
if first_half == list(reversed(second_half)):
return True
return False
@Timeout(5)
def is_square(self, n):
""" Checks if number is square.
Parameters
-----------
n : int
The number to check
Raises
------
TimeoutError
If has been running for 5 seconds
Return
-------
True
If is square
False
If isn't square or if number is bigger than 999999999999999
"""
if n > 999999999999999:
return False
element = math.sqrt(n)
if math.floor(element) == math.ceil(element):
return True
return False
@Timeout(5)
def is_triangle(self, n):
""" Checks if number is triangle.
Parameters
-----------
n : int
The number to check
Raises
------
TimeoutError
If has been running for 5 seconds
Return
-------
True
If is triangle
False
If isn't triangle or if number is bigger than 999999999999999
"""
if n > 999999999999999:
return False
delta = 1 - 4 * (-2 * n)
if delta < 0:
return False
x1 = (-1 - math.sqrt(delta)) / 2
x2 = (-1 + math.sqrt(delta)) / 2
if x1 >= 0 and math.ceil(x1) == math.floor(x1):
return True
elif x2 >= 0 and math.ceil(x2) == math.floor(x2):
return True
return False
@Timeout(15)
def _get_factors(self, n):
"""Gets factors of a number.
Parameters
-----------
n : int
A number to get its factors
Raises
-------
TimeoutError
If function is running for 15 seconds
Returns
--------
True
"""
d = 2
while n > 1:
if n != 0 and n % d == 0:
self.factors.append(d)
n //= d
else:
d += 1
return True
def get_factors(self, n):
"""Wrapper for _get_factors function handles exception
Parameters
-----------
n : int
A number to get its factors
Returns
--------
True
If function didn't timeout
False
If function timed out
"""
try:
self._get_factors(n)
except TimeoutError:
return False
return True
def get_divisors_check_semiprime_check_perfect(self, n):
"""Gets n's divisors, checks if n is semiprime and perfect.
It saves results in self.divisors_tab, self.semiprime and self.perfect.
Parameters
-----------
n : int
A number to get it's divisors and check if it's semiprime
Returns
--------
True
If didn't time out
False
If timed out
"""
try:
self._get_divisors_semiprime_perfect(n)
except TimeoutError:
self.divisors_tab = sorted(self.divisors_tab)
return False
return True
def check_primality(self, n):
"""Check if number n is prime
Parameters
----------
n: int
Number to check if it's prime
Returns
--------
dictionary
A dictionary with keys:
prime
True if number is prime
primality_timeout
True if function timed out after 10 seconds
"""
try:
if self.is_prime(n):
return {'prime': True}
else:
return {'prime': False}
except TimeoutError:
return {'prime': False, 'primality_timeout': True}
def get_number_systems(self, n):
"""Gets n in binary, hexadecimal, octal and decimal systems.
Parameters
-----------
n : int
Number to convert
Returns
--------
dictionary
Dictionary with n in binary, hexadecimal, octal and decimal systems.
"""
dec_n = int(n)
hex_n = hex(n)
oct_n = oct(n)
bin_n = bin(n)
return {"dec": dec_n, "hex": hex_n, "oct": oct_n, "bin": bin_n}
def check_additional(self, n):
"""Check if number is taxicab, palindromic, square or triangle.
Parameters
-----------
n : int
Number to get information about
Returns
--------
dictionary
A dictionary containing keys: taxicab, palindromic, square, triangle.
Values are True if number is given type or False if it isn't.
"""
taxicab_numbers = [2, 1729, 87539319, 6963472309248, 48988659276962496]
data = {'taxicab': False, 'palindromic': False, 'square': False, 'triangle': False}
if n in taxicab_numbers:
data['taxicab'] = True
try:
if self.is_palindromic(n):
data['palindromic'] = True
if self.is_triangle(n):
data['triangle'] = True
if self.is_square(n):
data['square'] = True
except TimeoutError:
data['additional_timeout'] = True
return data
return data
def compare_speed(self, n):
"""Compares n m/s to speed of light and speed of sound
Parameters
-----------
n : int
Number to compare
Returns
---------
dictionary
A dictionary with keys: light, sound.
"""
LIGHT_SPEED = 299792458
SOUND_SPEED = 340
compare_light = "{:.8f}".format(n / LIGHT_SPEED)
compare_sound = "{:.8f}".format(n / SOUND_SPEED)
return {'light': compare_light, 'sound': compare_sound}
def check_roots(self, n):
"""Check if number is a power of any number in range <2;11>
Parameters
----------
n : int
The number to check
Returns
--------
False
If n isn't a power of any of the numbers in range
list
A list of objects:
{
'number': A number that n is a power of
'power': A power of the number
}
"""
if n > 9999999999999999:
return False
data = []
for i in range(2, 12):
tmp = n
is_power = True
count = 0
while tmp != 1:
count += 1
tmp /= i
if math.ceil(tmp) != math.floor(tmp):
is_power = False
break
if is_power:
data.append({'number': i, 'power': count})
return data
def check_bus(self, n):
"""Function checks if n is a bus number and
if n is a bus number it returns all of it stops.
Parameters
----------
n : int
Number to check
Returns
--------
list
A list with all stops of the bus (in Poland)
If number is not a bus number it returns an empty list
"""
BUS_URL = "https://rj.metropoliaztm.pl/rozklady/1-{}/"
if n <= 998:
url = BUS_URL.format(n)
response = requests.get(url)
data = response.text
if "404 Wybrana strona nie istnieje" in data:
return []
soup = BeautifulSoup(data, features='html.parser')
stops = soup.find_all('a', {'class': 'direction-list-group-item'})
special_chars = [260, 262, 280, 321, 323, 211, 346, 377, 379]
final_list = []
for stop in stops[:math.ceil(len(stops) / 2)]:
text = stop.text.replace("\n", "").replace(" ", "").replace("\t", "").replace(chr(160), "")
i = 0
shift = 0
for char in text:
if (64 < ord(char) < 91 or ord(char) in special_chars) and i != 0:
text = text[:i + shift] + " " + text[i + shift:]
shift += 1
elif 47 < ord(char) < 58:
text = text[:i + shift]
i += 1
final_list.append(text)
return final_list
else:
return []
def check_year(self, n):
"""Function assumes that number is a year and checks if it's a leap year.
Parameters
-----------
n : int
Number (year) to check
Returns
--------
True
If the year is leap
False
If the year isn't leap
"""
if n > 0 and n % 4 == 0 and n % 100 != 0 or n % 400 == 0:
return True
return False
def check_phone(self, n):
"""Function assumes that n is a phone number and checks from which country is it
Parameters
-----------
n : int
Number (phone number) to check
Returns
--------
dictionary
A dictionary with name, dial_code, code and flag of the country
"""
if 999999999999999 >= n >= 1000000000:
n = str(n)
for o in self._phone_data:
dial_code = str(o['dial_code']).replace('+', "")
if dial_code in n[:len(dial_code)] and len(n[len(dial_code):]) <= 9:
return o
return {}
def check_number(self, n):
"""Checks if number is a positive integer
Parameters
-----------
n : string
Number to check (In string format)
Returns
--------
True
If number is a positive integer
False
If the number isn't a positive integer
"""
if not n.isnumeric():
print('You have to input a number')
return False
if int(n) < 0:
print("Please type in a positive integer")
return False
return True
def run(self, number):
"""Function that gets all of the information about the number.
It runs all of the above functions in 6 different threads.
Max execution time is 20 seconds.
Parameters
-----------
number : int, str
A number to get information about
Returns
-------
dictionary
A dictionary with all of the gathered data
"""
if not self.check_number(str(number)):
return
number = int(number)
divisors = ThreadWithReturn(target=self.get_divisors_check_semiprime_check_perfect, args=(number,), name="divisors")
divisors.start()
primality = ThreadWithReturn(target=self.check_primality, args=(number,), name="primality")
primality.start()
bus = ThreadWithReturn(target=self.check_bus, args=(number,), name="bus")
bus.start()
phone = ThreadWithReturn(target=self.check_phone, args=(number,), name="phone")
phone.start()
factors = ThreadWithReturn(target=self.get_factors, args=(number,), name="factors")
factors.start()
data = {'timeouts': []}
data.update(self.get_number_systems(number))
data.update(self.compare_speed(number))
data.update(self.check_additional(number))
data['roots'] = self.check_roots(number)
data['year'] = self.check_year(number)
if 'additional_timeout' in data:
data['timeouts'].append('additional')
del data['additional_timeout']
data.update(primality.join())
if 'primality_timeout' in data:
data['timeouts'].append('primality')
del data['additional_timeout']
if data['prime']:
for thread in threading.enumerate():
if thread.name == '_get_divisors_ext':
time.sleep(0.001)
thread.raise_exception()
result = True
else:
data['composite_number'] = True
result = divisors.join()
if not result:
data['timeouts'].append('divisors')
self.divisors_tab = sorted(list(set(self.divisors_tab)))
data['semiprime_factors'] = self.semi_prime_factors
data['semiprime'] = self.semiprime
data['divisors'] = self.divisors_tab
data['perfect'] = self.perfect
data['phone'] = phone.join()
data['bus'] = bus.join()
if not factors.join():
data['timeouts'].append('factors')
data['factors'] = self.factors
for thread in threading.enumerate():
time.sleep(0.001)
if thread.name != "MainThread" and "pydevd" not in thread.name:
thread.raise_exception()
return data | python | 21 | 0.5 | 124 | 26.145863 | 713 |
A class used to get information about a number.
...
Methods
--------
is_prime(n)
Checks if number is prime and returns True/False.
After 30 seconds times out and raises exception.
is_palindromic(n)
Checks if the number is palindromic (Reads the same from beginning and end).
After 5 seconds times out and raises exception.
is_square(n)
Checks if number is square (It's square root is integer).
After 5 seconds times out and raises exception.
is_triangle(n)
Checks if number is triangle.
After 5 seconds times out and raises exception.
get_factors(n)
Gets the factors of n.
get_divisors_check_semiprime_check_perfect(n)
Gets all n's divisors, check if n is semiprime and if n is perfect.
check_primality(n)
Checks if number is prime and handles eventual exception.
get_number_systems(n)
Gets n in binary, hexadecimal, octal and decimal systems.
check_additional(n)
Gets additional information about n - is_square, is_triangle, is_palindromic, is taxicab
and handles exceptions.
compare_speed(n)
Compares n to the speed of light and sound.
check_roots(n)
Check if n is a power of any number in range <2;11>.
check_bus(n)
Check if n is a number of a bus. If yes return the list of stops of this bus (in Poland).
check_year(n)
Assumes that n is a year and checks if it's leap.
check_phone(n)
Assumes that n is a phone number and checks from which country is it.
check_number(n)
Checks if n is a positive integer.
run(n)
Runs all of the above and returns all the information as a dictionary.
| class |
class ColumnSet:
"""\
A set of columns, unique by column name.
Initialized with a list of Column objects or
(column_name: String, column_type: ColumnType) tuples.
Offers simple functionality:
* ColumnSets can be added together (order is maintained)
* Columns can be looked up by ClickHouse normalized names, e.g. 'tags.key'
* `for_schema()` can be used to generate valid ClickHouse column names
and types for a table schema.
"""
def __init__(
self, columns: Sequence[Union[Column, Tuple[str, ColumnType]]]
) -> None:
self.columns = Column.to_columns(columns)
self._lookup: MutableMapping[str, FlattenedColumn] = {}
self._flattened: List[FlattenedColumn] = []
for column in self.columns:
self._flattened.extend(column.type.flatten(column.name))
for col in self._flattened:
if col.flattened in self._lookup:
raise RuntimeError("Duplicate column: {}".format(col.flattened))
self._lookup[col.flattened] = col
# also store it by the escaped name
self._lookup[col.escaped] = col
def __repr__(self) -> str:
return "ColumnSet({})".format(repr(self.columns))
def __eq__(self, other: object) -> bool:
return (
self.__class__ == other.__class__
and self._flattened == cast(ColumnSet, other)._flattened
)
def __len__(self) -> int:
return len(self._flattened)
def __add__(
self, other: Union[ColumnSet, Sequence[Tuple[str, ColumnType]]]
) -> ColumnSet:
if isinstance(other, ColumnSet):
return ColumnSet([*self.columns, *other.columns])
return ColumnSet([*self.columns, *other])
def __contains__(self, key: str) -> bool:
return key in self._lookup
def __getitem__(self, key: str) -> FlattenedColumn:
return self._lookup[key]
def __iter__(self) -> Iterator[FlattenedColumn]:
return iter(self._flattened)
def get(
self, key: str, default: Optional[FlattenedColumn] = None
) -> Optional[FlattenedColumn]:
try:
return self[key]
except KeyError:
return default | python | 16 | 0.601524 | 80 | 33.338462 | 65 | \
A set of columns, unique by column name.
Initialized with a list of Column objects or
(column_name: String, column_type: ColumnType) tuples.
Offers simple functionality:
* ColumnSets can be added together (order is maintained)
* Columns can be looked up by ClickHouse normalized names, e.g. 'tags.key'
* `for_schema()` can be used to generate valid ClickHouse column names
and types for a table schema.
| class |
class ToolTip:
'''
This class provides a flexible tooltip widget for Tkinter; it is based on IDLE's ToolTip
module which unfortunately seems to be broken (at least the version I saw).
Original author: Michael Lange <klappnase (at) freakmail (dot) de>
Modified slightly by Daniel R. Cappel, including these additions:
- 'remove' method, 'location' option, multi-monitor support, live update of textvariable, and a few other changes
The original class is no longer available online, however a simplified adaptation can be found here:
https://github.com/wikibook/python-in-practice/blob/master/TkUtil/Tooltip.py
INITIALIZATION OPTIONS:
anchor : where the text should be positioned inside the widget, must be one of "n", "s", "e", "w", "nw" and so on;
default is "center"
bd : borderwidth of the widget; default is 1 (NOTE: don't use "borderwidth" here)
bg : background color to use for the widget; default is "lightyellow" (NOTE: don't use "background")
delay : time in ms that it takes for the widget to appear on the screen when the mouse pointer has
entered the parent widget; default is 1500
fg : foreground (i.e. text) color to use; default is "black" (NOTE: don't use "foreground")
follow_mouse : if set to 1 the tooltip will follow the mouse pointer instead of being displayed
outside of the parent widget; this may be useful if you want to use tooltips for
large widgets like listboxes or canvases; default is 0
font : font to use for the widget; default is system specific
justify : how multiple lines of text will be aligned, must be "left", "right" or "center"; default is "left"
location : placement above or below the target (master) widget. values may be 'n' or 's' (default)
padx : extra space added to the left and right within the widget; default is 4
pady : extra space above and below the text; default is 2
relief : one of "flat", "ridge", "groove", "raised", "sunken" or "solid"; default is "solid"
state : must be "normal" or "disabled"; if set to "disabled" the tooltip will not appear; default is "normal"
text : the text that is displayed inside the widget
textvariable : if set to an instance of Tkinter.StringVar() the variable's value will be used as text for the widget
width : width of the widget; the default is 0, which means that "wraplength" will be used to limit the widgets width
wraplength : limits the number of characters in each line; default is 150
WIDGET METHODS:
configure(**opts) : change one or more of the widget's options as described above; the changes will take effect the
next time the tooltip shows up; NOTE: 'follow_mouse' cannot be changed after widget initialization
remove() : removes the tooltip from the parent widget
Other widget methods that might be useful if you want to subclass ToolTip:
enter() : callback when the mouse pointer enters the parent widget
leave() : called when the mouse pointer leaves the parent widget
motion() : is called when the mouse pointer moves inside the parent widget if 'follow_mouse' is set to 1 and
the tooltip has shown up to continually update the coordinates of the tooltip window
coords() : calculates the screen coordinates of the tooltip window
create_contents() : creates the contents of the tooltip window (by default a Tkinter.Label)
Ideas gleaned from PySol
Other Notes:
If text or textvariable are empty or not specified, the tooltip will not show. '''
version = 1.6
def __init__( self, master, text='Your text here', delay=1500, **opts ):
self.master = master
self._opts = {'anchor':'center', 'bd':1, 'bg':'lightyellow', 'delay':delay, 'fg':'black',
'follow_mouse':0, 'font':None, 'justify':'left', 'location':'s', 'padx':4, 'pady':2,
'relief':'solid', 'state':'normal', 'text':text, 'textvariable':None,
'width':0, 'wraplength':150}
self.configure(**opts)
self._tipwindow = None
self._id = None
self._id1 = self.master.bind("<Enter>", self.enter, '+')
self._id2 = self.master.bind("<Leave>", self.leave, '+')
self._id3 = self.master.bind("<ButtonPress>", self.leave, '+')
self._follow_mouse = 0
if self._opts['follow_mouse']:
self._id4 = self.master.bind("<Motion>", self.motion, '+')
self._follow_mouse = 1
# Monitor changes to the textvariable, if one is used (for dynamic updates to the tooltip's position)
if self._opts['textvariable']:
self._opts['textvariable'].trace( 'w', lambda nm, idx, mode: self.update() )
def configure(self, **opts):
for key in opts:
if self._opts.has_key(key):
self._opts[key] = opts[key]
else:
KeyError = 'KeyError: Unknown option: "%s"' %key
raise KeyError
def remove(self):
#self._tipwindow.destroy()
self.leave()
self.master.unbind("<Enter>", self._id1)
self.master.unbind("<Leave>", self._id2)
self.master.unbind("<ButtonPress>", self._id3)
if self._follow_mouse:
self.master.unbind("<Motion>", self._id4)
##----these methods handle the callbacks on "<Enter>", "<Leave>" and "<Motion>"---------------##
##----events on the parent widget; override them if you want to change the widget's behavior--##
def enter(self, event=None):
self._schedule()
def leave(self, event=None):
self._unschedule()
self._hide()
def motion(self, event=None):
if self._tipwindow and self._follow_mouse:
x, y = self.coords()
self._tipwindow.wm_geometry("+%d+%d" % (x, y))
def update(self, event=None):
tw = self._tipwindow
if not tw: return
if self._opts['text'] == 'Your text here' and not self._opts['textvariable'].get():
self.leave()
else:
tw.withdraw()
tw.update_idletasks() # to make sure we get the correct geometry
x, y = self.coords()
tw.wm_geometry("+%d+%d" % (x, y))
tw.deiconify()
##------the methods that do the work:---------------------------------------------------------##
def _schedule(self):
self._unschedule()
if self._opts['state'] == 'disabled': return
self._id = self.master.after(self._opts['delay'], self._show)
def _unschedule(self):
id = self._id
self._id = None
if id:
self.master.after_cancel(id)
def _show(self):
if self._opts['state'] == 'disabled' or \
( self._opts['text'] == 'Your text here' and not self._opts['textvariable'].get() ):
self._unschedule()
return
if not self._tipwindow:
self._tipwindow = tw = Tk.Toplevel(self.master)
# hide the window until we know the geometry
tw.withdraw()
tw.wm_overrideredirect(1)
if tw.tk.call("tk", "windowingsystem") == 'aqua':
tw.tk.call("::tk::unsupported::MacWindowStyle", "style", tw._w, "help", "none")
self.create_contents()
tw.update_idletasks()
x, y = self.coords()
tw.wm_geometry("+%d+%d" % (x, y))
tw.deiconify()
def _hide(self):
tw = self._tipwindow
self._tipwindow = None
if tw:
tw.destroy()
##----these methods might be overridden in derived classes:----------------------------------##
def coords(self):
# The tip window must be completely outside the master widget;
# otherwise when the mouse enters the tip window we get
# a leave event and it disappears, and then we get an enter
# event and it reappears, and so on forever :-(
# or we take care that the mouse pointer is always outside the tipwindow :-)
tw = self._tipwindow
twWidth, twHeight = tw.winfo_reqwidth(), tw.winfo_reqheight()
masterWidth, masterHeight = self.master.winfo_reqwidth(), self.master.winfo_reqheight()
if 's' in self._opts['location'] or 'e' in self._opts['location']:
cursorBuffer = 32 # Guestimate on cursor size, to ensure no overlap with it (or the master widget if follow_mouse=False)
else: cursorBuffer = 2
# if self._follow_mouse: # Tooltip needs to be well out of range of the cursor, to prevent triggering the original widget's leave event
# cursorBuffer += 32
# Establish base x/y coords
if self._follow_mouse: # Sets to cursor coords
x = self.master.winfo_pointerx()
y = self.master.winfo_pointery()
else: # Sets to widget top-left screen coords
x = self.master.winfo_rootx()
y = self.master.winfo_rooty()
# Offset the tooltip location from the master (target) widget, so that it is not over the top of it
if 'w' in self._opts['location'] or 'e' in self._opts['location']:
if self._follow_mouse:
if 'w' in self._opts['location']:
x -= ( twWidth + cursorBuffer )
else: x += cursorBuffer
# Center y coord relative to the mouse position
y -= ( twHeight / 2 - 8 )
else:
# Place the tooltip completely to the left or right of the target widget
if 'w' in self._opts['location']:
x -= ( twWidth + cursorBuffer )
else: x += masterWidth + cursorBuffer
# Vertically center tooltip relative to master widget
y += ( masterHeight / 2 - twHeight / 2 )
else: # No horizontal offset, so the tooltip must be placed above or below the target to prevent problems
if 'n' in self._opts['location']: # place the tooltip above the target
y -= ( twHeight + cursorBuffer )
else:
y += cursorBuffer
# Horizontally center tooltip relative to master widget
x += ( masterWidth / 2 - twWidth / 2 )
return x, y
def create_contents(self):
opts = self._opts.copy()
for opt in ('delay', 'follow_mouse', 'state', 'location'):
del opts[opt]
label = Tk.Label(self._tipwindow, **opts)
label.pack() | python | 16 | 0.665895 | 137 | 41.053097 | 226 |
This class provides a flexible tooltip widget for Tkinter; it is based on IDLE's ToolTip
module which unfortunately seems to be broken (at least the version I saw).
Original author: Michael Lange <klappnase (at) freakmail (dot) de>
Modified slightly by Daniel R. Cappel, including these additions:
- 'remove' method, 'location' option, multi-monitor support, live update of textvariable, and a few other changes
The original class is no longer available online, however a simplified adaptation can be found here:
https://github.com/wikibook/python-in-practice/blob/master/TkUtil/Tooltip.py
INITIALIZATION OPTIONS:
anchor : where the text should be positioned inside the widget, must be one of "n", "s", "e", "w", "nw" and so on;
default is "center"
bd : borderwidth of the widget; default is 1 (NOTE: don't use "borderwidth" here)
bg : background color to use for the widget; default is "lightyellow" (NOTE: don't use "background")
delay : time in ms that it takes for the widget to appear on the screen when the mouse pointer has
entered the parent widget; default is 1500
fg : foreground (i.e. text) color to use; default is "black" (NOTE: don't use "foreground")
follow_mouse : if set to 1 the tooltip will follow the mouse pointer instead of being displayed
outside of the parent widget; this may be useful if you want to use tooltips for
large widgets like listboxes or canvases; default is 0
font : font to use for the widget; default is system specific
justify : how multiple lines of text will be aligned, must be "left", "right" or "center"; default is "left"
location : placement above or below the target (master) widget. values may be 'n' or 's' (default)
padx : extra space added to the left and right within the widget; default is 4
pady : extra space above and below the text; default is 2
relief : one of "flat", "ridge", "groove", "raised", "sunken" or "solid"; default is "solid"
state : must be "normal" or "disabled"; if set to "disabled" the tooltip will not appear; default is "normal"
text : the text that is displayed inside the widget
textvariable : if set to an instance of Tkinter.StringVar() the variable's value will be used as text for the widget
width : width of the widget; the default is 0, which means that "wraplength" will be used to limit the widgets width
wraplength : limits the number of characters in each line; default is 150
WIDGET METHODS:
configure(**opts) : change one or more of the widget's options as described above; the changes will take effect the
next time the tooltip shows up; NOTE: 'follow_mouse' cannot be changed after widget initialization
remove() : removes the tooltip from the parent widget
Other widget methods that might be useful if you want to subclass ToolTip:
enter() : callback when the mouse pointer enters the parent widget
leave() : called when the mouse pointer leaves the parent widget
motion() : is called when the mouse pointer moves inside the parent widget if 'follow_mouse' is set to 1 and
the tooltip has shown up to continually update the coordinates of the tooltip window
coords() : calculates the screen coordinates of the tooltip window
create_contents() : creates the contents of the tooltip window (by default a Tkinter.Label)
Ideas gleaned from PySol
Other Notes:
If text or textvariable are empty or not specified, the tooltip will not show. | class |
class MountedComponentsCollection:
"""
Manages a collection of components mounted to tree as children of a specified parent.
"""
def __init__(self, parent):
self.parent = parent
self.components = OrderedDict()
@staticmethod
def is_updatable(component, new_component):
return component.__class__ is new_component.__class__
def __iter__(self) -> Iterable[Component]:
return iter(self.components.values())
def update(self, components: list['Component']):
old_components = self.components
new_components = OrderedDict()
for new_component in components:
key = new_component.key
old_component: Component = old_components.get(key, None)
if old_component:
if self.is_updatable(old_component, new_component):
if old_component.update_props_from(new_component):
old_component.enqueue_update()
old_component.ref = new_component.ref
old_component.assign_ref()
new_components[key] = old_component
del old_components[key]
continue
else:
old_component.unmount()
del old_components[key]
new_component.mount(self.parent)
new_component.assign_ref()
new_components[key] = new_component
for old_component in old_components.values():
old_component.unmount()
self.components = new_components
def unmount(self):
for component in self.components.values():
component.unmount()
self.components = OrderedDict() | python | 16 | 0.576634 | 89 | 32.921569 | 51 |
Manages a collection of components mounted to tree as children of a specified parent.
| class |
class Interpretable:
'''Mixin for Enums, interprets value either directly
as the Enum instance or as Enum.value.
'''
@classmethod
def interpret(cls, o):
try:
if o in cls:
return o
except TypeError:
pass
return cls(o) | python | 11 | 0.546667 | 56 | 24.083333 | 12 | Mixin for Enums, interprets value either directly
as the Enum instance or as Enum.value.
| class |
class PyTestGenInputFile:
"""An input source file to have tests generated for.
Attributes:
name (str): The filename of the source file.
path (str): The directory (relative to project dir) of the source file.
full_path (str): The full path to the input file.
"""
def __init__(self, name: str, file_path: str) -> None:
self.name = name
self.path = file_path
self.full_path = path.join(file_path, name)
def get_module(self) -> str:
return self.full_path.replace(os.sep, ".")[:-3]
def get_test_file_path(self, output_dir: str) -> str:
return path.join(output_dir, self.path,
f"test_{self.name[:-3].strip('_')}.py")
def has_test_file(self, output_dir: str) -> bool:
return path.exists(self.get_test_file_path(output_dir))
def __eq__(self, other) -> bool:
if isinstance(other, PyTestGenInputFile):
return self.name == other.name and \
self.path == other.path and \
self.full_path == other.full_path
return False
def __repr__(self) -> str:
return f"PyTestGenInputFile(\"{self.name}\", \"{self.path}\")" | python | 15 | 0.579035 | 79 | 36.59375 | 32 | An input source file to have tests generated for.
Attributes:
name (str): The filename of the source file.
path (str): The directory (relative to project dir) of the source file.
full_path (str): The full path to the input file.
| class |
class PyTestGenInputSet:
"""A set of input files for generating tests from.
Attributes:
output_dir (str): The directory to output tests to.
input_files (List[PyTestGenInputFile]): The files to generate tests for.
"""
def __init__(self, output_dir: str,
input_files: List[PyTestGenInputFile]) -> None:
self.output_dir = output_dir
self.input_files = input_files
def __eq__(self, other) -> bool:
if isinstance(other, PyTestGenInputSet):
return self.input_files == other.input_files \
and self.output_dir == other.output_dir
return False
def __repr__(self) -> str:
input_files = ", ".join([f"{f.__repr__()}" for f in self.input_files])
return f"PyTestGenInputSet(\"{self.output_dir}\", [ {input_files} ])" | python | 14 | 0.597852 | 80 | 38.952381 | 21 | A set of input files for generating tests from.
Attributes:
output_dir (str): The directory to output tests to.
input_files (List[PyTestGenInputFile]): The files to generate tests for.
| class |
class Registry:
"""
Catalogue registry for types, preprocessors, logging configuration, and others
Attributes:
types:
Types for field specs, registered functions for creating ValueSupplierInterface that will supply
values for the given type
>>> @datacraft.registry.types('special_sauce')
... def _handle_special_type(field_spec: dict, loader: datacraft.Loader) -> ValueSupplierInterface:
... # return ValueSupplierInterface from spec config
schemas:
Schemas for field spec types, used to validate that the spec for a given type conforms to the schema
for it
>>> @datacraft.registry.schemas('special_sauce')
... def _special_sauce_schema() -> dict:
... # return JSON schema validating specs with type: special_sauce
preprocessors:
Functions to modify specs before data generations process. If there is a customization you want to do for
every data spec, or an extenstion you added that requires modifications to the spec before they are run,
this is where you would register that pre-processor.
>>> @datacraft.registry.preprocessors('custom-preprocessing')
... def _preprocess_spec_to_some_end(raw_spec: dict, is_refs: bool) -> dict:
... # return spec with any modification
logging:
Custom logging setup. Can override or modify the default logging behavior.
>>> @datacraft.registry.logging('denoise')
... def _customize_logging(loglevel: str):
... logging.getLogger('too.verbose.module').level = logging.ERROR
formats:
Registered formats for output. When using the --format <format name>. Unlike other registered functions,
this one is called directly for to perform the required formatting function. The return value from the
formatter is the new value that will be written to the configured output (default is console).
>>> @datacraft.registry.formats('custom_format')
... def _format_custom(record: dict) -> str:
... # write to database or some other custom output, return something to write out or print to console
distribution:
Different numeric distributions, normal, uniform, etc. These are used for more nuanced counts values. The
built in distributions are uniform and normal.
>>> @datacraft.registry.distribution('hyperbolic_inverse_haversine')
... def _hyperbolic_inverse_haversine(mean, stddev, **kwargs):
... # return a datacraft.Distribution, args can be custom for the defined distribution
defaults:
Default values. Different types have different default values for some configs. This provides a mechanism
to override or to register other custom defaults. Read a default from the registry
with: ``datacraft.types.get_default('var_key')``. While ``datacraft.types.all_defaults()`` will give a
mapping of all registered default keys and values.
>>> @datacraft.registry.defaults('special_sauce_ingredient')
... def _default_special_sauce_ingredient():
... # return the default value (i.e. onions)
casters:
Cast or alter values in simple ways. These are all the valid forms of altering generated values after they
are created outside of the ValueSupplier types. Use ``datacraft.types.registered_casters()`` to get a list
of all the currently registered ones.
>>> @datacraft.registry.casters('reverse')
... def _cast_reverse_strings():
... # return a datacraft.CasterInterface
"""
types = catalogue.create('datacraft', 'type')
schemas = catalogue.create('datacraft', 'schemas')
preprocessors = catalogue.create('datacraft', 'preprocessor')
logging = catalogue.create('datacraft', 'logging')
formats = catalogue.create('datacraft', 'format')
distribution = catalogue.create('datacraft', 'distribution')
defaults = catalogue.create('datacraft', 'defaults')
casters = catalogue.create('datacraft', 'casters') | python | 8 | 0.649616 | 118 | 52.111111 | 81 |
Catalogue registry for types, preprocessors, logging configuration, and others
Attributes:
types:
Types for field specs, registered functions for creating ValueSupplierInterface that will supply
values for the given type
>>> @datacraft.registry.types('special_sauce')
... def _handle_special_type(field_spec: dict, loader: datacraft.Loader) -> ValueSupplierInterface:
... # return ValueSupplierInterface from spec config
schemas:
Schemas for field spec types, used to validate that the spec for a given type conforms to the schema
for it
>>> @datacraft.registry.schemas('special_sauce')
... def _special_sauce_schema() -> dict:
... # return JSON schema validating specs with type: special_sauce
preprocessors:
Functions to modify specs before data generations process. If there is a customization you want to do for
every data spec, or an extenstion you added that requires modifications to the spec before they are run,
this is where you would register that pre-processor.
>>> @datacraft.registry.preprocessors('custom-preprocessing')
... def _preprocess_spec_to_some_end(raw_spec: dict, is_refs: bool) -> dict:
... # return spec with any modification
logging:
Custom logging setup. Can override or modify the default logging behavior.
>>> @datacraft.registry.logging('denoise')
... def _customize_logging(loglevel: str):
... logging.getLogger('too.verbose.module').level = logging.ERROR
formats:
Registered formats for output. When using the --format <format name>. Unlike other registered functions,
this one is called directly for to perform the required formatting function. The return value from the
formatter is the new value that will be written to the configured output (default is console).
>>> @datacraft.registry.formats('custom_format')
... def _format_custom(record: dict) -> str:
... # write to database or some other custom output, return something to write out or print to console
distribution:
Different numeric distributions, normal, uniform, etc. These are used for more nuanced counts values. The
built in distributions are uniform and normal.
>>> @datacraft.registry.distribution('hyperbolic_inverse_haversine')
... def _hyperbolic_inverse_haversine(mean, stddev, **kwargs):
... # return a datacraft.Distribution, args can be custom for the defined distribution
defaults:
Default values. Different types have different default values for some configs. This provides a mechanism
to override or to register other custom defaults. Read a default from the registry
with: ``datacraft.types.get_default('var_key')``. While ``datacraft.types.all_defaults()`` will give a
mapping of all registered default keys and values.
>>> @datacraft.registry.defaults('special_sauce_ingredient')
... def _default_special_sauce_ingredient():
... # return the default value (i.e. onions)
casters:
Cast or alter values in simple ways. These are all the valid forms of altering generated values after they
are created outside of the ValueSupplier types. Use ``datacraft.types.registered_casters()`` to get a list
of all the currently registered ones.
>>> @datacraft.registry.casters('reverse')
... def _cast_reverse_strings():
... # return a datacraft.CasterInterface
| class |
class ZipFileManager:
"""Reader and Writer of Zip files.
Parameters
----------
path
Path to a zip archive to be read or written to.
source
Either a string/byte representing zipped data, or a file-like object
connected to zipped data.
replace
In case the provided ``path`` is pointing to an already-existing file,
should it be erased or appended to ?
"""
# The Zipfile manager manages at the same time files already in the zip
# archive when it was created, and files left uncompressed in memory.
# The uncompressed files in memory are flushed into the archive upon
# closing of the manager, with the ``.close`` method.
def __init__(self, path=None, source=None, replace=False):
self.path = "." if path is None else path
if path == "@memory": # VIRTUAL ZIP FROM SCRATCH
self.source = StringBytesIO()
self.writer = zipfile.ZipFile(
self.source, "a", compression=zipfile.ZIP_DEFLATED
)
self.reader = zipfile.ZipFile(StringBytesIO(EMPTY_ZIP_BYTES), "r")
elif path is not None: # ON DISK ZIP
self.source = path
if replace or not os.path.exists(path):
with open(self.source, "wb") as f:
f.write(EMPTY_ZIP_BYTES)
self.writer = zipfile.ZipFile(
self.source, "a", compression=zipfile.ZIP_DEFLATED
)
self.reader = zipfile.ZipFile(self.source, "r")
else: # VIRTUAL ZIP FROM EXISTING DATA
self.source = source
if isinstance(self.source, (str, bytes)):
self.source = StringBytesIO(source)
self.writer = zipfile.ZipFile(
self.source, "a", compression=zipfile.ZIP_DEFLATED
)
self.reader = zipfile.ZipFile(self.source, "r")
self.files_data = defaultdict(lambda *a: StringBytesIO())
def relative_path(self, target):
path = target._path[len(self.path) + 1 :]
if target._is_dir and path != "":
path += "/"
return path
def list_directory_components(self, directory, regexpr):
path = self.relative_path(directory)
matches = [
re.match(regexpr % re.escape(path), name) for name in self.reader.namelist()
]
return sorted(
set(
[
match.groups()[0]
for match in matches
if match is not None and (match.groups()[0] != "")
]
)
)
def list_files(self, directory):
return self.list_directory_components(directory, regexpr=r"%s([^/]*)$")
def list_dirs(self, directory):
return self.list_directory_components(directory, regexpr=r"%s([^/]*)/")
def read(self, fileobject, mode="r"):
path = self.relative_path(fileobject).strip("/")
if path in self.files_data:
result = self.files_data[path].getvalue()
else:
result = self.reader.read(path)
if (mode == "r") and hasattr(result, "decode"):
result = result.decode("utf8")
return result
def write(self, fileobject, content, mode="w"):
path = self.relative_path(fileobject)
if self.path_exists_in_file(fileobject):
raise NotImplementedError(
"Rewriting a file already zipped is not currently supported. "
"It may actually not even be possible, or in an inelegant way."
)
if mode in ("w", "wb"): # i.e. not append
self.files_data.pop(path, None) # overwrite if exists!
if not isinstance(content, bytes):
content = content.encode("utf-8")
self.files_data[path].write(content)
def delete(self, directory):
raise NotImplementedError(
"Deleting/modifying/overwriting an already-zipped file "
"is not currently supported. "
"It may actually not even be possible, or in a very inelegant way."
)
def create(self, directory, replace=False):
if self.path_exists_in_file(directory) and replace:
self.delete(directory)
# TODO: I don't know how to create an empty dir in a zip but right
# now it doesn't really matter because the directories are created
# the moment we create a file whose address in this directory.
def path_exists_in_file(self, directory):
return self.relative_path(directory) in self.reader.namelist()
@staticmethod
def join_paths(*paths):
return "/".join(*paths)
def close(self):
for path, data in self.files_data.items():
self.writer.writestr(path, data.getvalue())
self.writer.close()
if hasattr(self.source, "getvalue"):
return self.source.getvalue()
def open(self, fileobject, mode="a"):
path = self.relative_path(fileobject)
if mode in ("r", "rb"):
container = {"r": StringIO, "rb": BytesIO}[mode]
if path in self.files_data:
content = self.files_data[path].getvalue()
if mode == "r":
content = content.decode()
return container(content)
else:
return container(self.read(fileobject, mode=mode))
else:
if mode == "w" and path not in self.files_data:
self.files_data[path] = StringIO()
elif mode == "wb" and path not in self.files_data:
self.files_data[path] = BytesIO()
return self.files_data[path] | python | 18 | 0.573973 | 88 | 37.849315 | 146 | Reader and Writer of Zip files.
Parameters
----------
path
Path to a zip archive to be read or written to.
source
Either a string/byte representing zipped data, or a file-like object
connected to zipped data.
replace
In case the provided ``path`` is pointing to an already-existing file,
should it be erased or appended to ?
| class |
class Daubechies7:
"""
Properties
----------
asymmetric, orthogonal, bi-orthogonal
All values are from http://wavelets.pybytes.com/wavelet/db7/
"""
__name__ = "Daubechies Wavelet 7"
__motherWaveletLength__ = 14 # length of the mother wavelet
__transformWaveletLength__ = 2 # minimum wavelength of input signal
# decomposition filter
# low-pass
decompositionLowFilter = [
0.0003537138000010399,
- 0.0018016407039998328,
0.00042957797300470274,
0.012550998556013784,
- 0.01657454163101562,
- 0.03802993693503463,
0.0806126091510659,
0.07130921926705004,
- 0.22403618499416572,
- 0.14390600392910627,
0.4697822874053586,
0.7291320908465551,
0.39653931948230575,
0.07785205408506236,
]
# high-pass
decompositionHighFilter = [
-0.07785205408506236,
0.39653931948230575,
- 0.7291320908465551,
0.4697822874053586,
0.14390600392910627,
- 0.22403618499416572,
- 0.07130921926705004,
0.0806126091510659,
0.03802993693503463,
- 0.01657454163101562,
- 0.012550998556013784,
0.00042957797300470274,
0.0018016407039998328,
0.0003537138000010399
]
# reconstruction filters
# low pass
reconstructionLowFilter = [
0.07785205408506236,
0.39653931948230575,
0.7291320908465551,
0.4697822874053586,
- 0.14390600392910627,
- 0.22403618499416572,
0.07130921926705004,
0.0806126091510659,
- 0.03802993693503463,
- 0.01657454163101562,
0.012550998556013784,
0.00042957797300470274,
- 0.0018016407039998328,
0.0003537138000010399
]
# high-pass
reconstructionHighFilter = [
0.0003537138000010399,
0.0018016407039998328,
0.00042957797300470274,
- 0.012550998556013784,
- 0.01657454163101562,
0.03802993693503463,
0.0806126091510659,
- 0.07130921926705004,
- 0.22403618499416572,
0.14390600392910627,
0.4697822874053586,
- 0.7291320908465551,
0.39653931948230575,
- 0.07785205408506236
] | python | 7 | 0.61405 | 72 | 26.141176 | 85 |
Properties
----------
asymmetric, orthogonal, bi-orthogonal
All values are from http://wavelets.pybytes.com/wavelet/db7/
| class |
class logical_expression:
"""A logical statement/sentence/expression class"""
# All types need to be mutable, so we don't have to pass in the whole class.
# We can just pass, for example, the symbol variable to a function, and the
# function's changes will actually alter the class variable. Thus, lists.
def __init__(self):
self.symbol = ['']
self.connective = ['']
self.subexpressions = [] | python | 9 | 0.657471 | 80 | 47.444444 | 9 | A logical statement/sentence/expression class | class |
class TaskBase:
"""The base class of all Tasks.
Contains shared logic that drives all of its sub-classes. Should never be
instantiated on its own.
"""
def __init__(self, id_: Union[int, str], actions: Union[List[Action], Action]):
"""Initialize the TaskBase.
Parameters
----------
id_ : int
An ID for the task
actions : List[Any]
The list of DF actions representing this Task.
Needed for reconstruction in the extension.
"""
self.id: Union[int, str] = id_
self.state = TaskState.RUNNING
self.parent: Optional[CompoundTask] = None
self._api_name: str
api_action: Union[Action, Type[CompoundAction]]
if isinstance(actions, list):
if len(actions) == 1:
api_action = actions[0]
else:
api_action = CompoundAction
else:
api_action = actions
self._api_name = api_action.__class__.__name__
self.result: Any = None
self.action_repr: Union[List[Action], Action] = actions
self.is_played = False
@property
def is_completed(self) -> bool:
"""Get indicator of whether the task completed.
Note that completion is not equivalent to success.
"""
return not(self.state is TaskState.RUNNING)
def set_is_played(self, is_played: bool):
"""Set the is_played flag for the Task.
Needed for updating the orchestrator's is_replaying flag.
Parameters
----------
is_played : bool
Whether the latest event for this Task has been played before.
"""
self.is_played = is_played
def change_state(self, state: TaskState):
"""Transition a running Task to a terminal state: success or failure.
Parameters
----------
state : TaskState
The terminal state to assign to this Task
Raises
------
Exception
When the input state is RUNNING
"""
if state is TaskState.RUNNING:
raise Exception("Cannot change Task to the RUNNING state.")
self.state = state
def set_value(self, is_error: bool, value: Any):
"""Set the value of this Task: either an exception of a result.
Parameters
----------
is_error : bool
Whether the value represents an exception of a result.
value : Any
The value of this Task
Raises
------
Exception
When the Task failed but its value was not an Exception
"""
new_state = self.state
if is_error:
if not isinstance(value, Exception):
if not (isinstance(value, TaskBase) and isinstance(value.result, Exception)):
err_message = f"Task ID {self.id} failed but it's value was not an Exception"
raise Exception(err_message)
new_state = TaskState.FAILED
else:
new_state = TaskState.SUCCEEDED
self.change_state(new_state)
self.result = value
self.propagate()
def propagate(self):
"""Notify parent Task of this Task's state change."""
has_completed = not (self.state is TaskState.RUNNING)
has_parent = not (self.parent is None)
if has_completed and has_parent:
self.parent.handle_completion(self) | python | 16 | 0.567233 | 97 | 30.87156 | 109 | The base class of all Tasks.
Contains shared logic that drives all of its sub-classes. Should never be
instantiated on its own.
| class |
class Balance:
"""A balance model class"""
def __init__(self, amount: float, currency: str) -> None:
"""Initialize a new Balance object
:param amount: Balance amount
:type amount: float
:param currency: currency code
:type currency: str
"""
self._amount = amount
self._currency = currency
@property
def amount(self) -> float:
"""Get the balance amount
:return: amount
:rtype: float
"""
return self._amount
@property
def currency(self) -> str:
"""Get the currency code
:return: currency code
:rtype: str
"""
return self._currency | python | 8 | 0.541311 | 61 | 21.677419 | 31 | A balance model class | class |
class BalanceService:
"""A service for interacting with the balance endpoint"""
def __init__(self, http_client) -> None:
self._http_client = http_client
def get(self) -> Balance:
"""Get the client's current balance
:return: Balance object containing the amount and currency.
:rtype: Balance
"""
balance = self._http_client.get(BALANCE_ENDPOINT).json()
return Balance(balance["amount"], balance["currency"]) | python | 12 | 0.631027 | 67 | 33.142857 | 14 | A service for interacting with the balance endpoint | class |
class VariableInfo:
"""
Class that represents the declaration and usage of a variable.
"""
def __init__(self, is_arg: bool, typed: str):
self.is_arg = is_arg
self.mutable = False
self.mutable_ref = False
self.typed = typed | python | 8 | 0.592593 | 66 | 29.111111 | 9 |
Class that represents the declaration and usage of a variable.
| class |
class ShiftVisitor:
"""
Visitor to get time or number shift
Attributes:
time_shift (int): get the max time shift
num_shift (int): get the max num shift
"""
def __init__(self):
"""
Constructor
"""
self.time_shift = 0
self.num_shift = 0
def visit_TimeFilter(self, time_filter):
"""
Constructor
Args:
time_filter (TimeFilter): time filter
Returns:
None
"""
self.time_shift = max(self.time_shift, time_filter.calculate())
def visit_NumFilter(self, num_filter):
"""
Constructor
Args:
num_filter (NumFilter): time filter
Returns:
None
"""
self.num_shift = max(self.num_shift, num_filter.calculate()) | python | 11 | 0.517533 | 71 | 21.378378 | 37 |
Visitor to get time or number shift
Attributes:
time_shift (int): get the max time shift
num_shift (int): get the max num shift
| class |
class Card:
"""
This class will hold all card information:
Instance Variables:
name, atk, shield, heal, extra_move, draw_card, special
"""
def __init__(self, name, atk, shield, heal, extra_move, draw_card, special):
self.name = name
self.atk = atk
self.shield = shield
self.heal = heal
self.extra_move = extra_move
self.draw_card = draw_card
self.special = special
def attack(self, Opponent):
if Opponent.shield:
if(Opponent.shield[0].shield <= self.atk):
print(f"{Opponent.shield[0].name} has been destroyed!")
Opponent.discard.append(Opponent.shield.popleft())
else:
Opponent.shield[0].shield -= self.atk
print(f"{Opponent.shield[0].name} has {Opponent.shield[0].shield} shield power left.")
else:
if(Opponent.cur_hp < self.atk):
Opponent.cur_hp = 0
print(f"{Opponent.name}'s health has dropped to {Opponent.cur_hp}. {Opponent.name} is defeated.")
else:
Opponent.cur_hp -= self.atk
print(f"{Opponent.name}'s health has dropped to {Opponent.cur_hp}")
def cast_heal(self, Player):
if Player.cur_hp + self.heal > Player.max_hp:
Player.cur_hp = Player.max_hp
print(f"{Player.name} hp is healed to max.\n{Player.name} hp is now {Player.cur_hp}.")
else:
Player.cur_hp += self.heal
print(f"{Player.name} healed {self.heal} hp.\n{Player.name} hp is now {Player.cur_hp}.")
def add_shield(self, Player):
Player.shield.append(self)
def gain_extra_move(self, Player):
pass
def draw_extra_cards(self, Player):
for _ in range(self.draw_card):
Player.draw_card()
def cast_special(self, *args):
pass
def __rmul__(self, other):
#todo: Implement this overload in the future. Return X copies of the Card Class
pass
def __del__(self):
#print(f"{self.name} is DELETED!")
pass
def __str__(self):
return f"\n{self.name}:\nATK: {str(self.atk)}\nShield: {str(self.shield)}\nHeal: {str(self.heal)}\nExtra Move: {str(self.extra_move)}\
\nDraw Card: {str(self.draw_card)}\nSpecial Move: {str(self.special)}\n"
def __repr__(self):
return f"\nCard({self.name},{str(self.atk)},{str(self.shield)},{str(self.heal)},{str(self.extra_move)},{str(self.draw_card)},{str(self.special)})\n" | python | 18 | 0.562092 | 156 | 38.424242 | 66 |
This class will hold all card information:
Instance Variables:
name, atk, shield, heal, extra_move, draw_card, special
| class |
class PreparedTestRequest:
"A test request meant for consumption by a test harness."
def __init__(self, raw_harness, raw_submission, raw_assignment,
testables_directory, harness_directory, suite_specific = {}):
self.raw_harness = raw_harness
self.raw_submission = raw_submission
self.raw_assignment = raw_assignment
self.testables_directory = testables_directory
self.harness_directory = harness_directory
self.suite_specific = suite_specific
self.actions = []
def to_dict(self):
result = {
"raw_harness": self.raw_harness,
"raw_submission": self.raw_submission,
"raw_assignment": self.raw_assignment,
"testables_directory": self.testables_directory,
"harness_directory": self.harness_directory,
"actions": self.actions
}
assert all("/" in i for i in self.suite_specific)
result.update(self.suite_specific)
return result
def update_actions(self, request_type = None):
if request_type is None:
request_type = self.raw_submission["test_type"]
if not request_type:
request_type = "public"
if request_type == "public":
grab_types = ("public", )
elif request_type == "final":
grab_types = ("public", "final")
else:
raise ValueError("Expected final or public, got %s." % request_type)
self.actions = []
for i in grab_types:
self.actions += \
self.raw_harness["config"].get("galah/actions", {}).get(i, []) | python | 15 | 0.692253 | 71 | 28.680851 | 47 | A test request meant for consumption by a test harness. | class |
class GHPullRequestWorker:
"""
Worker that collects Pull Request related data from the Github API and stores it in our database.
:param task: most recent task the broker added to the worker's queue
:param config: holds info like api keys, descriptions, and database connection strings
"""
def __init__(self, config, task=None):
self._task = task
self._child = None
self._queue = Queue()
self._maintain_queue = Queue()
self.working_on = None
self.config = config
LOG_FORMAT = '%(levelname)s:[%(name)s]: %(message)s'
logging.basicConfig(filename='worker_{}.log'.format(self.config['id'].split('.')[len(self.config['id'].split('.')) - 1]), filemode='w', level=logging.INFO, format=LOG_FORMAT)
logging.info('Worker (PID: {}) initializing...\n'.format(str(os.getpid())))
self.db = None
self.table = None
self.API_KEY = self.config['key']
self.tool_source = 'GitHub Pull Request Worker'
self.tool_version = '0.0.1' # See __init__.py
self.data_source = 'GitHub API'
self.results_counter = 0
self.headers = {'Authorization': f'token {self.API_KEY}'}
self.history_id = None
self.finishing_task = True
self.specs = {
"id": self.config['id'],
"location": self.config['location'],
"qualifications": [
{
"given": [['github_url']],
"models":['pull_requests', 'pull_request_commits', 'pull_request_files']
}
],
"config": [self.config]
}
self.DB_STR = 'postgresql://{}:{}@{}:{}/{}'.format(
self.config['user'], self.config['password'], self.config['host'],
self.config['port'], self.config['database']
)
#Database connections
logging.info("Making database connections...\n")
dbschema = 'augur_data'
self.db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(dbschema)})
helper_schema = 'augur_operations'
self.helper_db = s.create_engine(self.DB_STR, poolclass = s.pool.NullPool,
connect_args={'options': '-csearch_path={}'.format(helper_schema)})
metadata = MetaData()
helper_metadata = MetaData()
metadata.reflect(self.db, only=['contributors', 'pull_requests',
'pull_request_assignees', 'pull_request_events', 'pull_request_labels',
'pull_request_message_ref', 'pull_request_meta', 'pull_request_repo',
'pull_request_reviewers', 'pull_request_teams', 'message', 'pull_request_commits',
'pull_request_files'])
helper_metadata.reflect(self.helper_db, only=['worker_history', 'worker_job', 'worker_oauth'])
Base = automap_base(metadata=metadata)
HelperBase = automap_base(metadata=helper_metadata)
Base.prepare()
HelperBase.prepare()
self.contributors_table = Base.classes.contributors.__table__
self.pull_requests_table = Base.classes.pull_requests.__table__
self.pull_request_assignees_table = Base.classes.pull_request_assignees.__table__
self.pull_request_events_table = Base.classes.pull_request_events.__table__
self.pull_request_labels_table = Base.classes.pull_request_labels.__table__
self.pull_request_message_ref_table = Base.classes.pull_request_message_ref.__table__
self.pull_request_meta_table = Base.classes.pull_request_meta.__table__
self.pull_request_repo_table = Base.classes.pull_request_repo.__table__
self.pull_request_reviewers_table = Base.classes.pull_request_reviewers.__table__
self.pull_request_teams_table = Base.classes.pull_request_teams.__table__
self.message_table = Base.classes.message.__table__
self.pull_request_commits_table = Base.classes.pull_request_commits.__table__
self.pull_request_files_table = Base.classes.pull_request_files.__table__
self.history_table = HelperBase.classes.worker_history.__table__
self.job_table = HelperBase.classes.worker_job.__table__
logging.info("Querying starting ids info...\n")
# Increment so we are ready to insert the 'next one' of each of these most recent ids
self.history_id = get_max_id(self, 'worker_history', 'history_id', operations_table=True) + 1
self.pr_id_inc = get_max_id(self, 'pull_requests', 'pull_request_id')
self.cntrb_id_inc = get_max_id(self, 'contributors', 'cntrb_id')
self.msg_id_inc = get_max_id(self, 'message', 'msg_id')
self.pr_msg_ref_id_inc = get_max_id(self, 'pull_request_message_ref', 'pr_msg_ref_id')
self.label_id_inc = get_max_id(self, 'pull_request_labels', 'pr_label_id')
self.event_id_inc = get_max_id(self, 'pull_request_events', 'pr_event_id')
self.reviewer_id_inc = get_max_id(self, 'pull_request_reviewers', 'pr_reviewer_map_id')
self.assignee_id_inc = get_max_id(self, 'pull_request_assignees', 'pr_assignee_map_id')
self.pr_meta_id_inc = get_max_id(self, 'pull_request_meta', 'pr_repo_meta_id')
# Organize different api keys/oauths available
init_oauths(self)
# Send broker hello message
connect_to_broker(self)
# self.pull_requests_graphql({
# 'job_type': 'MAINTAIN',
# 'models': ['pull_request_files'],
# 'display_name': 'pull_request_files model for url: https://github.com/zephyrproject-rtos/actions_sandbox.git',
# 'given': {
# 'github_url': 'https://github.com/zephyrproject-rtos/actions_sandbox.git'
# }
# }, 25201)
def update_config(self, config):
""" Method to update config and set a default
"""
self.config = {
"display_name": "",
"description": "",
"required": 1,
"type": "string"
}
self.config.update(config)
self.API_KEY = self.config['key']
@property
def task(self):
""" Property that is returned when the worker's current task is referenced
"""
return self._task
@task.setter
def task(self, value):
""" entry point for the broker to add a task to the queue
Adds this task to the queue, and calls method to process queue
"""
github_url = value['given']['github_url']
repo_url_SQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(github_url))
rs = pd.read_sql(repo_url_SQL, self.db, params={})
try:
repo_id = int(rs.iloc[0]['repo_id'])
if value['job_type'] == "UPDATE" or value['job_type'] == "MAINTAIN":
self._queue.put(value)
if 'focused_task' in value:
if value['focused_task'] == 1:
self.finishing_task = True
except Exception as e:
logging.error(f"error: {e}, or that repo is not in our database: {value}\n")
self._task = value
self.run()
def cancel(self):
""" Delete/cancel current task
"""
self._task = None
def run(self):
""" Kicks off the processing of the queue if it is not already being processed
Gets run whenever a new task is added
"""
logging.info("Running...\n")
self._child = Process(target=self.collect, args=())
self._child.start()
def collect(self):
""" Function to process each entry in the worker's task queue
Determines what action to take based off the message type
"""
while True:
if not self._queue.empty():
message = self._queue.get()
self.working_on = message['job_type']
else:
break
logging.info("Popped off message: {}\n".format(str(message)))
if message['job_type'] == 'STOP':
break
if message['job_type'] != 'MAINTAIN' and message['job_type'] != 'UPDATE':
raise ValueError('{} is not a recognized task type'.format(message['job_type']))
pass
# Query all repos with repo url of given task
repoUrlSQL = s.sql.text("""
SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'
""".format(message['given']['github_url']))
repo_id = int(pd.read_sql(repoUrlSQL, self.db, params={}).iloc[0]['repo_id'])
try:
if message['models'][0] == 'pull_requests':
self.pull_requests_model(message, repo_id)
elif message['models'][0] == 'pull_request_commits':
self.pull_request_commits_model(message, repo_id)
elif message['models'][0] == 'pull_request_files':
self.pull_requests_graphql(message, repo_id)
except Exception as e:
register_task_failure(self, message, repo_id, e)
pass
def graphql_paginate(self, query, data_subjects, before_parameters=None):
""" Paginate a GitHub GraphQL query backwards
:param query: A string, holds the GraphQL query
:rtype: A Pandas DataFrame, contains all data contained in the pages
"""
logging.info(f'Start paginate with params: \n{data_subjects} '
f'\n{before_parameters}')
def all_items(dictionary):
for key, value in dictionary.items():
if type(value) is dict:
yield (key, value)
yield from all_items(value)
else:
yield (key, value)
if not before_parameters:
before_parameters = {}
for subject, _ in all_items(data_subjects):
before_parameters[subject] = ''
start_cursor = None
has_previous_page = True
base_url = 'https://api.github.com/graphql'
tuples = []
def find_root_of_subject(data, key_subject):
key_nest = None
for subject, nest in data.items():
if key_subject in nest:
key_nest = nest[key_subject]
break
elif type(nest) == dict:
return find_root_of_subject(nest, key_subject)
else:
raise KeyError
return key_nest
for data_subject, nest in data_subjects.items():
logging.info(f'Beginning paginate process for field {data_subject} '
f'for query: {query}')
page_count = 0
while has_previous_page:
page_count += 1
num_attempts = 3
success = False
for attempt in range(num_attempts):
logging.info(f'Attempt #{attempt + 1} for hitting GraphQL endpoint '
f'page number {page_count}\n')
response = requests.post(base_url, json={'query': query.format(
**before_parameters)}, headers=self.headers)
update_gh_rate_limit(self, response)
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
logging.info("Error!: {}".format(data['errors']))
if data['errors'][0]['type'] == 'RATE_LIMITED':
update_gh_rate_limit(self, response)
num_attempts -= 1
continue
if 'data' in data:
success = True
root = find_root_of_subject(data, data_subject)
page_info = root['pageInfo']
data = root['edges']
break
else:
logging.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
logging.info("Github repo was not found or does not exist for endpoint: {}\n".format(base_url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
num_attempts -= 1
update_gh_rate_limit(self, response, temporarily_disable=True)
if data['message'] == 'Bad credentials':
update_gh_rate_limit(self, response, bad_credentials=True)
if not success:
logging.info('GraphQL query failed: {}'.format(query))
continue
before_parameters.update({
data_subject: ', before: \"{}\"'.format(page_info['startCursor'])
})
has_previous_page = page_info['hasPreviousPage']
tuples += data
logging.info(f'Paged through {page_count} pages and '
f'collected {len(tuples)} data points\n')
if not nest:
return tuples
return tuples + self.graphql_paginate(query, data_subjects[subject],
before_parameters=before_parameters)
def pull_requests_graphql(self, task_info, repo_id):
owner, repo = get_owner_repo(task_info['given']['github_url'])
# query existing PRs and the respective url we will append the commits url to
pr_number_sql = s.sql.text("""
SELECT DISTINCT pr_src_number as pr_src_number, pull_requests.pull_request_id
FROM pull_requests--, pull_request_meta
WHERE repo_id = {}
""".format(repo_id))
pr_numbers = pd.read_sql(pr_number_sql, self.db, params={})
pr_file_rows = []
for index, pull_request in enumerate(pr_numbers.itertuples()):
logging.info(f'Querying files for pull request #{index + 1} of {len(pr_numbers)}')
query = """
{{
repository(owner:"%s", name:"%s"){{
pullRequest (number: %s) {{
""" % (owner, repo, pull_request.pr_src_number) + """
files (last: 100{files}) {{
pageInfo {{
hasPreviousPage
hasNextPage
endCursor
startCursor
}}
edges {{
node {{
additions
deletions
path
}}
}}
}}
}}
}}
}}
"""
pr_file_rows += [{
'pull_request_id': pull_request.pull_request_id,
'pr_file_additions': pr_file['node']['additions'] + 5,
'pr_file_deletions': pr_file['node']['deletions'],
'pr_file_path': pr_file['node']['path'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API',
} for pr_file in self.graphql_paginate(query, {'files': None})]
# Get current table values
table_values_sql = s.sql.text("""
SELECT pull_request_files.*
FROM pull_request_files, pull_requests
WHERE pull_request_files.pull_request_id = pull_requests.pull_request_id
AND repo_id = :repo_id
""")
logging.info(f'Getting table values with the following PSQL query: \n{table_values_sql}\n')
table_values = pd.read_sql(table_values_sql, self.db, params={'repo_id': repo_id})
# Compare queried values against table values for dupes/updates
if len(pr_file_rows) > 0:
table_columns = pr_file_rows[0].keys()
else:
logging.info(f'No rows need insertion for repo {repo_id}\n')
register_task_completion(self, task_info, repo_id, 'pull_request_files')
# Compare queried values against table values for dupes/updates
pr_file_rows_df = pd.DataFrame(pr_file_rows)
pr_file_rows_df = pr_file_rows_df.dropna(subset=['pull_request_id'])
pr_file_rows_df['need_update'] = 0
dupe_columns = ['pull_request_id', 'pr_file_path']
update_columns = ['pr_file_additions', 'pr_file_deletions']
logging.info(f'{pr_file_rows_df}')
logging.info(f'{table_values}')
need_insertion = pr_file_rows_df.merge(table_values, suffixes=('','_table'),
how='outer', indicator=True, on=dupe_columns).loc[
lambda x : x['_merge']=='left_only'][table_columns]
need_updates = pr_file_rows_df.merge(table_values, on=dupe_columns, suffixes=('','_table'),
how='inner',indicator=False)[table_columns].merge(table_values,
on=update_columns, suffixes=('','_table'), how='outer',indicator=True
).loc[lambda x : x['_merge']=='left_only'][table_columns]
need_updates['b_pull_request_id'] = need_updates['pull_request_id']
need_updates['b_pr_file_path'] = need_updates['pr_file_path']
pr_file_insert_rows = need_insertion.to_dict('records')
pr_file_update_rows = need_updates.to_dict('records')
logging.info(f'Repo id {repo_id} needs {len(need_insertion)} insertions and '
f'{len(need_updates)} updates.\n')
if len(pr_file_update_rows) > 0:
success = False
while not success:
try:
self.db.execute(
self.pull_request_files_table.update().where(
self.pull_request_files_table.c.pull_request_id == bindparam('b_pull_request_id') and
self.pull_request_files_table.c.pr_file_path == bindparam('b_pr_file_path')).values(
pr_file_additions=bindparam('pr_file_additions'),
pr_file_deletions=bindparam('pr_file_deletions')),
pr_file_update_rows
)
success = True
except Exception as e:
logging.info('error: {}'.format(e))
time.sleep(5)
if len(pr_file_insert_rows) > 0:
success = False
while not success:
try:
self.db.execute(
self.pull_request_files_table.insert(),
pr_file_insert_rows
)
success = True
except Exception as e:
logging.info('error: {}'.format(e))
time.sleep(5)
register_task_completion(self, task_info, repo_id, 'pull_request_files')
def pull_request_commits_model(self, task_info, repo_id):
""" Queries the commits related to each pull request already inserted in the db """
# query existing PRs and the respective url we will append the commits url to
pr_url_sql = s.sql.text("""
SELECT DISTINCT pr_url, pull_requests.pull_request_id
FROM pull_requests--, pull_request_meta
WHERE repo_id = {}
""".format(repo_id))
urls = pd.read_sql(pr_url_sql, self.db, params={})
for pull_request in urls.itertuples(): # for each url of PRs we have inserted
commits_url = pull_request.pr_url + '/commits?page={}'
table = 'pull_request_commits'
table_pkey = 'pr_cmt_id'
duplicate_col_map = {'pr_cmt_sha': 'sha'}
update_col_map = {}
# Use helper paginate function to iterate the commits url and check for dupes
pr_commits = paginate(self, commits_url, duplicate_col_map, update_col_map, table, table_pkey,
where_clause="where pull_request_id = {}".format(pull_request.pull_request_id))
for pr_commit in pr_commits: # post-pagination, iterate results
if pr_commit['flag'] == 'need_insertion': # if non-dupe
pr_commit_row = {
'pull_request_id': pull_request.pull_request_id,
'pr_cmt_sha': pr_commit['sha'],
'pr_cmt_node_id': pr_commit['node_id'],
'pr_cmt_message': pr_commit['commit']['message'],
# 'pr_cmt_comments_url': pr_commit['comments_url'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API',
}
result = self.db.execute(self.pull_request_commits_table.insert().values(pr_commit_row))
logging.info(f"Inserted Pull Request Commit: {result.inserted_primary_key}\n")
register_task_completion(self, task_info, repo_id, 'pull_request_commits')
def pull_requests_model(self, entry_info, repo_id):
"""Pull Request data collection function. Query GitHub API for PhubRs.
:param entry_info: A dictionary consisiting of 'git_url' and 'repo_id'
:type entry_info: dict
"""
github_url = entry_info['given']['github_url']
logging.info('Beginning collection of Pull Requests...\n')
logging.info(f'Repo ID: {repo_id}, Git URL: {github_url}\n')
record_model_process(self, repo_id, 'pull_requests')
owner, repo = self.get_owner_repo(github_url)
url = (f'https://api.github.com/repos/{owner}/{repo}/pulls?state=all&' +
'direction=asc&per_page=100&page={}')
# Get pull requests that we already have stored
# Set pseudo key (something other than PK) to
# check dupicates with
table = 'pull_requests'
table_pkey = 'pull_request_id'
update_col_map = {'pr_src_state': 'state'}
duplicate_col_map = {'pr_src_id': 'id'}
#list to hold pull requests needing insertion
prs = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey,
where_clause='WHERE repo_id = {}'.format(repo_id),
value_update_col_map={'pr_augur_contributor_id': float('nan')})
# Discover and remove duplicates before we start inserting
logging.info("Count of pull requests needing update or insertion: " + str(len(prs)) + "\n")
for pr_dict in prs:
pr = {
'repo_id': repo_id,
'pr_url': pr_dict['url'],
'pr_src_id': pr_dict['id'],
'pr_src_node_id': None,
'pr_html_url': pr_dict['html_url'],
'pr_diff_url': pr_dict['diff_url'],
'pr_patch_url': pr_dict['patch_url'],
'pr_issue_url': pr_dict['issue_url'],
'pr_augur_issue_id': None,
'pr_src_number': pr_dict['number'],
'pr_src_state': pr_dict['state'],
'pr_src_locked': pr_dict['locked'],
'pr_src_title': pr_dict['title'],
'pr_augur_contributor_id': find_id_from_login(self, pr_dict['user']['login']),
'pr_body': pr_dict['body'],
'pr_created_at': pr_dict['created_at'],
'pr_updated_at': pr_dict['updated_at'],
'pr_closed_at': pr_dict['closed_at'],
'pr_merged_at': pr_dict['merged_at'],
'pr_merge_commit_sha': pr_dict['merge_commit_sha'],
'pr_teams': None,
'pr_milestone': pr_dict['milestone']['title'] if pr_dict['milestone'] else None,
'pr_commits_url': pr_dict['commits_url'],
'pr_review_comments_url': pr_dict['review_comments_url'],
'pr_review_comment_url': pr_dict['review_comment_url'],
'pr_comments_url': pr_dict['comments_url'],
'pr_statuses_url': pr_dict['statuses_url'],
'pr_meta_head_id': None,
'pr_meta_base_id': None,
'pr_src_issue_url': pr_dict['issue_url'],
'pr_src_comments_url': pr_dict['comments_url'], # NOTE: this seems redundant
'pr_src_review_comments_url': pr_dict['review_comments_url'], # this too
'pr_src_commits_url': pr_dict['commits_url'], # this one also seems redundant
'pr_src_statuses_url': pr_dict['statuses_url'],
'pr_src_author_association': pr_dict['author_association'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': 'GitHub API'
}
if pr_dict['flag'] == 'need_insertion':
logging.info(f'PR {pr_dict["id"]} needs to be inserted\n')
result = self.db.execute(self.pull_requests_table.insert().values(pr))
logging.info(f"Added Pull Request: {result.inserted_primary_key}")
self.pr_id_inc = int(result.inserted_primary_key[0])
elif pr_dict['flag'] == 'need_update':
result = self.db.execute(self.pull_requests_table.update().where(
self.pull_requests_table.c.pr_src_id==pr_dict['id']).values(pr))
logging.info("Updated tuple in the pull_requests table with existing pr_src_id: {}".format(
pr_dict['id']))
self.pr_id_inc = pr_dict['pkey']
else:
logging.info("PR does not need to be inserted. Fetching its id from DB")
pr_id_sql = s.sql.text("""
SELECT pull_request_id FROM pull_requests
WHERE pr_src_id={}
""".format(pr_dict['id']))
self.pr_id_inc = int(pd.read_sql(pr_id_sql, self.db).iloc[0]['pull_request_id'])
self.query_labels(pr_dict['labels'], self.pr_id_inc)
self.query_pr_events(owner, repo, pr_dict['number'], self.pr_id_inc)
self.query_pr_comments(owner, repo, pr_dict['number'], self.pr_id_inc)
self.query_reviewers(pr_dict['requested_reviewers'], self.pr_id_inc)
self.query_pr_meta(pr_dict['head'], pr_dict['base'], self.pr_id_inc)
logging.info(f"Inserted PR data for {owner}/{repo}")
self.results_counter += 1
register_task_completion(self, entry_info, repo_id, 'pull_requests')
def query_labels(self, labels, pr_id):
logging.info('Querying PR Labels\n')
if len(labels) == 0:
logging.info('No new labels to add\n')
return
table = 'pull_request_labels'
duplicate_col_map = {'pr_src_id': 'id'}
update_col_map = {}
table_pkey = 'pr_label_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
pr_labels_table_values = get_table_values(self, cols_query, [table])
new_labels = assign_tuple_action(self, labels, pr_labels_table_values, update_col_map, duplicate_col_map,
table_pkey)
logging.info(f'Found {len(new_labels)} labels\n')
for label_dict in new_labels:
label = {
'pull_request_id': pr_id,
'pr_src_id': label_dict['id'],
'pr_src_node_id': label_dict['node_id'],
'pr_src_url': label_dict['url'],
'pr_src_description': label_dict['name'],
'pr_src_color': label_dict['color'],
'pr_src_default_bool': label_dict['default'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if label_dict['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_labels_table.insert().values(label))
logging.info(f"Added PR Label: {result.inserted_primary_key}\n")
logging.info(f"Inserted PR Labels data for PR with id {pr_id}\n")
self.results_counter += 1
self.label_id_inc = int(result.inserted_primary_key[0])
def query_pr_events(self, owner, repo, gh_pr_no, pr_id):
logging.info('Querying PR Events\n')
url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' +
'/events?per_page=100&page={}')
# Get pull request events that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'pull_request_events'
table_pkey = 'pr_event_id'
update_col_map = {}
duplicate_col_map = {'issue_event_src_id': 'id'}
#list to hold contributors needing insertion or update
pr_events = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey)
logging.info("Count of pull request events needing insertion: " + str(len(pr_events)) + "\n")
for pr_event_dict in pr_events:
if pr_event_dict['actor']:
cntrb_id = find_id_from_login(self, pr_event_dict['actor']['login'])
else:
cntrb_id = 1
pr_event = {
'pull_request_id': pr_id,
'cntrb_id': cntrb_id,
'action': pr_event_dict['event'],
'action_commit_hash': None,
'created_at': pr_event_dict['created_at'],
'issue_event_src_id': pr_event_dict['id'],
'node_id': pr_event_dict['node_id'],
'node_url': pr_event_dict['url'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.pull_request_events_table.insert().values(pr_event))
logging.info(f"Added PR Event: {result.inserted_primary_key}\n")
self.results_counter += 1
self.event_id_inc = int(result.inserted_primary_key[0])
logging.info(f"Inserted PR Events data for PR with id {pr_id}\n")
def query_reviewers(self, reviewers, pr_id):
logging.info('Querying Reviewers')
if reviewers is None or len(reviewers) == 0:
logging.info('No reviewers to add')
return
table = 'pull_request_reviewers'
duplicate_col_map = {'pr_reviewer_map_id': 'id'}
update_col_map = {}
table_pkey = 'pr_reviewer_map_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
reviewers_table_values = get_table_values(self, cols_query, [table])
new_reviewers = assign_tuple_action(self, reviewers, reviewers_table_values, update_col_map, duplicate_col_map,
table_pkey)
for reviewers_dict in new_reviewers:
if 'login' in reviewers_dict:
cntrb_id = find_id_from_login(self, reviewers_dict['login'])
else:
cntrb_id = 1
reviewer = {
'pull_request_id': pr_id,
'cntrb_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if reviewers_dict['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_reviewers_table.insert().values(reviewer))
logging.info(f"Added PR Reviewer {result.inserted_primary_key}")
self.reviewer_id_inc = int(result.inserted_primary_key[0])
self.results_counter += 1
logging.info(f"Finished inserting PR Reviewer data for PR with id {pr_id}")
def query_assignee(self, assignees, pr_id):
logging.info('Querying Assignees')
if assignees is None or len(assignees) == 0:
logging.info('No assignees to add')
return
table = 'pull_request_assignees'
duplicate_col_map = {'pr_assignee_map_id': 'id'}
update_col_map = {}
table_pkey = 'pr_assignee_map_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
assignee_table_values = get_table_values(self, cols_query, [table])
assignees = assign_tuple_action(self, assignees, assignee_table_values, update_col_map, duplicate_col_map,
table_pkey)
for assignee_dict in assignees:
if 'login' in assignee_dict:
cntrb_id = find_id_from_login(self, assignee_dict['login'])
else:
cntrb_id = 1
assignee = {
'pull_request_id': pr_id,
'contrib_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if assignee_dict['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_assignees_table.insert().values(assignee))
logging.info(f'Added PR Assignee {result.inserted_primary_key}')
self.assignee_id_inc = int(result.inserted_primary_key[0])
self.results_counter += 1
logging.info(f'Finished inserting PR Assignee data for PR with id {pr_id}')
def query_pr_meta(self, head, base, pr_id):
logging.info('Querying PR Meta')
table = 'pull_request_meta'
duplicate_col_map = {'pr_sha': 'sha'}
update_col_map = {}
value_update_col_map = {'pr_src_meta_label': None}
table_pkey = 'pr_repo_meta_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
update_keys += list(value_update_col_map.keys())
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
meta_table_values = get_table_values(self, cols_query, [table])
pr_meta_dict = {
'head': assign_tuple_action(self, [head], meta_table_values, update_col_map, duplicate_col_map,
table_pkey, value_update_col_map=value_update_col_map)[0],
'base': assign_tuple_action(self, [base], meta_table_values, update_col_map, duplicate_col_map,
table_pkey, value_update_col_map=value_update_col_map)[0]
}
for pr_side, pr_meta_data in pr_meta_dict.items():
pr_meta = {
'pull_request_id': pr_id,
'pr_head_or_base': pr_side,
'pr_src_meta_label': pr_meta_data['label'],
'pr_src_meta_ref': pr_meta_data['ref'],
'pr_sha': pr_meta_data['sha'],
'cntrb_id': find_id_from_login(self, pr_meta_data['user']['login']) if pr_meta_data['user'] \
and 'login' in pr_meta_data['user'] else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if pr_meta_data['flag'] == 'need_update':
result = self.db.execute(self.pull_request_meta_table.update().where(
self.pull_request_meta_table.c.pr_sha==pr_meta['pr_sha'] and
self.pull_request_meta_table.c.pr_head_or_base==pr_side
).values(pr_meta))
logging.info("Updated tuple in the issues table with existing gh_issue_id: {}".format(
issue_dict['id']))
self.issue_id_inc = issue_dict['pkey']
elif pr_meta_data['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_meta_table.insert().values(pr_meta))
logging.info(f'Added PR Head {result.inserted_primary_key}')
self.pr_meta_id_inc = int(result.inserted_primary_key[0])
self.results_counter += 1
else:
pr_meta_id_sql = """
SELECT pr_repo_meta_id FROM pull_request_meta
WHERE pr_sha='{}'
""".format(pr_meta_data['sha'])
self.pr_meta_id_inc = int(pd.read_sql(pr_meta_id_sql, self.db).iloc[0]['pr_repo_meta_id'])
if pr_meta_data['repo']:
self.query_pr_repo(pr_meta_data['repo'], pr_side, self.pr_meta_id_inc)
else:
logging.info('No new PR Head data to add')
logging.info(f'Finished inserting PR Head & Base data for PR with id {pr_id}')
def query_pr_comments(self, owner, repo, gh_pr_no, pr_id):
logging.info('Querying PR Comments')
url = (f'https://api.github.com/repos/{owner}/{repo}/issues/{gh_pr_no}' +
'/comments?per_page=100&page={}')
# Get pull request comments that we already have stored
# Set our duplicate and update column map keys (something other than PK) to
# check dupicates/needed column updates with
table = 'pull_request_message_ref'
table_pkey = 'pr_msg_ref_id'
update_col_map = {}
duplicate_col_map = {'pr_message_ref_src_comment_id': 'id'}
#list to hold contributors needing insertion or update
pr_messages = paginate(self, url, duplicate_col_map, update_col_map, table, table_pkey)
logging.info("Count of pull request comments needing insertion: " + str(len(pr_messages)) + "\n")
for pr_msg_dict in pr_messages:
if pr_msg_dict['user'] and 'login' in pr_msg_dict['user']:
cntrb_id = find_id_from_login(self, pr_msg_dict['user']['login'])
else:
cntrb_id = 1
msg = {
'rgls_id': None,
'msg_text': pr_msg_dict['body'],
'msg_timestamp': pr_msg_dict['created_at'],
'msg_sender_email': None,
'msg_header': None,
'pltfrm_id': '25150',
'cntrb_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.message_table.insert().values(msg))
logging.info(f'Added PR Comment {result.inserted_primary_key}')
self.msg_id_inc = int(result.inserted_primary_key[0])
pr_msg_ref = {
'pull_request_id': pr_id,
'msg_id': self.msg_id_inc,
'pr_message_ref_src_comment_id': pr_msg_dict['id'],
'pr_message_ref_src_node_id': pr_msg_dict['node_id'],
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(
self.pull_request_message_ref_table.insert().values(pr_msg_ref)
)
logging.info(f'Added PR Message Ref {result.inserted_primary_key}')
self.pr_msg_ref_id_inc = int(result.inserted_primary_key[0])
self.results_counter += 1
logging.info(f'Finished adding PR Message data for PR with id {pr_id}')
def query_pr_repo(self, pr_repo, pr_repo_type, pr_meta_id):
logging.info(f'Querying PR {pr_repo_type} repo')
table = 'pull_request_repo'
duplicate_col_map = {'pr_src_repo_id': 'id'}
update_col_map = {}
table_pkey = 'pr_repo_id'
update_keys = list(update_col_map.keys()) if update_col_map else []
cols_query = list(duplicate_col_map.keys()) + update_keys + [table_pkey]
pr_repo_table_values = get_table_values(self, cols_query, [table])
new_pr_repo = assign_tuple_action(self, [pr_repo], pr_repo_table_values, update_col_map, duplicate_col_map,
table_pkey)[0]
if new_pr_repo['owner'] and 'login' in new_pr_repo['owner']:
cntrb_id = find_id_from_login(self, new_pr_repo['owner']['login'])
else:
cntrb_id = 1
pr_repo = {
'pr_repo_meta_id': pr_meta_id,
'pr_repo_head_or_base': pr_repo_type,
'pr_src_repo_id': new_pr_repo['id'],
# 'pr_src_node_id': new_pr_repo[0]['node_id'],
'pr_src_node_id': None,
'pr_repo_name': new_pr_repo['name'],
'pr_repo_full_name': new_pr_repo['full_name'],
'pr_repo_private_bool': new_pr_repo['private'],
'pr_cntrb_id': cntrb_id,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
if new_pr_repo['flag'] == 'need_insertion':
result = self.db.execute(self.pull_request_repo_table.insert().values(pr_repo))
logging.info(f'Added PR {pr_repo_type} repo {result.inserted_primary_key}')
self.results_counter += 1
logging.info(f'Finished adding PR {pr_repo_type} Repo data for PR with id {self.pr_id_inc}')
def get_owner_repo(self, github_url):
split = github_url.split('/')
owner = split[-2]
repo = split[-1]
if '.git' in repo:
repo = repo[:-4]
return owner, repo | python | 23 | 0.538334 | 182 | 42.438596 | 969 |
Worker that collects Pull Request related data from the Github API and stores it in our database.
:param task: most recent task the broker added to the worker's queue
:param config: holds info like api keys, descriptions, and database connection strings
| class |
class Indenter:
"""Qutepart functionality, related to indentation
Public attributes:
width Indent width
useTabs Indent uses Tabs (instead of spaces)
"""
_DEFAULT_INDENT_WIDTH = 4
_DEFAULT_INDENT_USE_TABS = False
def __init__(self, qpart):
self._qpart = qpart
self.width = self._DEFAULT_INDENT_WIDTH
self.useTabs = self._DEFAULT_INDENT_USE_TABS
self._smartIndenter = _getSmartIndenter('normal', self._qpart, self)
def setSyntax(self, syntax):
"""Choose smart indentation algorithm according to syntax"""
self._smartIndenter = self._chooseSmartIndenter(syntax)
def text(self):
"""Get indent text as \t or string of spaces
"""
if self.useTabs:
return '\t'
else:
return ' ' * self.width
def triggerCharacters(self):
"""Trigger characters for smart indentation"""
return self._smartIndenter.TRIGGER_CHARACTERS
def autoIndentBlock(self, block, char='\n'):
"""Indent block after Enter pressed or trigger character typed
"""
currentText = block.text()
spaceAtStartLen = len(currentText) - len(currentText.lstrip())
currentIndent = currentText[:spaceAtStartLen]
indent = self._smartIndenter.computeIndent(block, char)
if indent is not None and indent != currentIndent:
self._qpart.replaceText(block.position(), spaceAtStartLen, indent)
def onChangeSelectedBlocksIndent(self, increase, withSpace=False):
"""Tab or Space pressed and few blocks are selected, or Shift+Tab pressed
Insert or remove text from the beginning of blocks
"""
def blockIndentation(block):
text = block.text()
return text[:len(text) - len(text.lstrip())]
def cursorAtSpaceEnd(block):
cursor = QTextCursor(block)
cursor.setPosition(block.position() + len(blockIndentation(block)))
return cursor
def indentBlock(block):
cursor = cursorAtSpaceEnd(block)
cursor.insertText(' ' if withSpace else self.text())
def spacesCount(text):
return len(text) - len(text.rstrip(' '))
def unIndentBlock(block):
currentIndent = blockIndentation(block)
if currentIndent.endswith('\t'):
charsToRemove = 1
elif withSpace:
charsToRemove = 1 if currentIndent else 0
else:
if self.useTabs:
charsToRemove = min(spacesCount(currentIndent), self.width)
else: # spaces
if currentIndent.endswith(self.text()): # remove indent level
charsToRemove = self.width
else: # remove all spaces
charsToRemove = min(spacesCount(currentIndent), self.width)
if charsToRemove:
cursor = cursorAtSpaceEnd(block)
cursor.setPosition(cursor.position() - charsToRemove, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
cursor = self._qpart.textCursor()
startBlock = self._qpart.document().findBlock(cursor.selectionStart())
endBlock = self._qpart.document().findBlock(cursor.selectionEnd())
if(cursor.selectionStart() != cursor.selectionEnd() and
endBlock.position() == cursor.selectionEnd() and
endBlock.previous().isValid()):
endBlock = endBlock.previous() # do not indent not selected line if indenting multiple lines
indentFunc = indentBlock if increase else unIndentBlock
if startBlock != endBlock: # indent multiply lines
stopBlock = endBlock.next()
block = startBlock
with self._qpart:
while block != stopBlock:
indentFunc(block)
block = block.next()
newCursor = QTextCursor(startBlock)
newCursor.setPosition(endBlock.position() + len(endBlock.text()), QTextCursor.KeepAnchor)
self._qpart.setTextCursor(newCursor)
else: # indent 1 line
indentFunc(startBlock)
def onShortcutIndentAfterCursor(self):
"""Tab pressed and no selection. Insert text after cursor
"""
cursor = self._qpart.textCursor()
def insertIndent():
if self.useTabs:
cursor.insertText('\t')
else: # indent to integer count of indents from line start
charsToInsert = self.width - (len(self._qpart.textBeforeCursor()) % self.width)
cursor.insertText(' ' * charsToInsert)
if cursor.positionInBlock() == 0: # if no any indent - indent smartly
block = cursor.block()
self.autoIndentBlock(block, '')
# if no smart indentation - just insert one indent
if self._qpart.textBeforeCursor() == '':
insertIndent()
else:
insertIndent()
def onShortcutUnindentWithBackspace(self):
"""Backspace pressed, unindent
"""
assert self._qpart.textBeforeCursor().endswith(self.text())
charsToRemove = len(self._qpart.textBeforeCursor()) % len(self.text())
if charsToRemove == 0:
charsToRemove = len(self.text())
cursor = self._qpart.textCursor()
cursor.setPosition(cursor.position() - charsToRemove, QTextCursor.KeepAnchor)
cursor.removeSelectedText()
def onAutoIndentTriggered(self):
"""Indent current line or selected lines
"""
cursor = self._qpart.textCursor()
startBlock = self._qpart.document().findBlock(cursor.selectionStart())
endBlock = self._qpart.document().findBlock(cursor.selectionEnd())
if startBlock != endBlock: # indent multiply lines
stopBlock = endBlock.next()
block = startBlock
with self._qpart:
while block != stopBlock:
self.autoIndentBlock(block, '')
block = block.next()
else: # indent 1 line
self.autoIndentBlock(startBlock, '')
def _chooseSmartIndenter(self, syntax):
"""Get indenter for syntax
"""
if syntax.indenter is not None:
try:
return _getSmartIndenter(syntax.indenter, self._qpart, self)
except KeyError:
logger.error("Indenter '%s' is not finished yet. But you can do it!" % syntax.indenter)
try:
return _getSmartIndenter(syntax.name, self._qpart, self)
except KeyError:
pass
return _getSmartIndenter('normal', self._qpart, self) | python | 22 | 0.59346 | 105 | 36.103825 | 183 | Qutepart functionality, related to indentation
Public attributes:
width Indent width
useTabs Indent uses Tabs (instead of spaces)
| class |
class Coordinates:
"""Data class for passing around lat and lng"""
lat: float
lon: float | python | 6 | 0.653465 | 51 | 19.4 | 5 | Data class for passing around lat and lng | class |
class ClimateMetadata:
"""Data class for passing around climate metadata"""
relative_change_precip: float
monthly_average_precip: float | python | 6 | 0.743243 | 56 | 28.8 | 5 | Data class for passing around climate metadata | class |
class Module:
"""Represents a module, which may contain several commands
"""
def __init__(self) -> None:
self.commands = {
"fact": self.showSnippet, "facc": self.showSnippet, "topic": self.showSnippet, "quote": self.showSnippet,
"addfact": self.manageSnippet, "addtopic": self.manageSnippet, "addquote": self.manageSnippet,
"deletefact": self.manageSnippet, "removefact": self.manageSnippet,
"deletetopic": self.manageSnippet, "removetopic": self.manageSnippet,
"deletequote": self.manageSnippet, "removequote": self.manageSnippet,
"countfacts": self.countSnippets, "factcount": self.countSnippets,
"counttopics": self.countSnippets, "topiccount": self.countSnippets,
"countquotes": self.countSnippets, "quotecount": self.countSnippets,
"factlist": self.exportSnippets, "listfacts": self.exportSnippets,
"topiclist": self.exportSnippets, "listtopics": self.exportSnippets,
"quotelist": self.exportSnippets, "listquotes": self.exportSnippets
}
self.factList = data.get("factList")
self.topicList = data.get("topicList")
self.quoteList = data.get("quoteList")
def showSnippet(self, message: core.BotMessage) -> None:
"""Shows a fact, quote, or topic in chat
Arguments:
message {Message} -- the Message object that invoked the command
"""
kind = 'facts'
snippetList = self.factList
if "topic" in message.arguments[0]:
kind = 'topics'
snippetList = self.topicList
elif "quote" in message.arguments[0]:
kind = 'quotes'
snippetList = self.quoteList
if message.room:
roomid = message.room.id
if roomid == 'trivia' and kind == 'quotes':
return message.respond('This command is disabled in the Trivia room.')
elif len(message.arguments) > 1:
roomid = psclient.toID(message.arguments[1])
else:
return message.respond("You must specify a room.")
if not snippetList or roomid not in snippetList.keys():
return message.respond(f"There are no {kind} for this room.")
return message.respond(random.choice(snippetList[roomid]))
def manageSnippet(self, message: core.BotMessage) -> None:
"""Removes or adds a fact, topic, or quote
Arguments:
message {Message} -- the Message object that invoked the command
"""
if message.room and len(message.arguments) > 1:
room = message.room
snippet = ",".join(message.arguments[1:]).strip()
elif len(message.arguments) > 2:
room = message.connection.getRoom(message.arguments[1])
snippet = ",".join(message.arguments[2:]).strip()
else:
return message.respond("You must specify a fact/topic/quote (and a room if used in PMs).")
if not message.sender.can("addfact", room): return message.respond("Permission denied.")
if not re.match(r'[a-zA-Z0-9]', snippet): snippet = " " + snippet
kind = 'Fact'
snippetList = self.factList
if "topic" in message.arguments[0]:
kind = 'Topic'
snippetList = self.topicList
elif "quote" in message.arguments[0]:
kind = 'Quote'
snippetList = self.quoteList
isAddition = "add" in message.arguments[0]
if not snippetList: snippetList = {room.id: []}
if room.id not in snippetList.keys():
snippetList[room.id] = []
if snippet not in snippetList[room.id] and isAddition:
snippetList[room.id].append(snippet)
message.respond(translations.translate(room, f"{kind} was successfully added!"))
elif snippet in snippetList[room.id] and not isAddition:
snippetList[room.id].remove(snippet)
message.respond(translations.translate(room, f"{kind} was successfully removed!"))
else:
return message.respond(f"That {kind} is {'already' if isAddition else 'not'} in the room's list!")
if kind == 'Topic':
self.topicList = snippetList
return data.store("topicList", self.topicList)
if kind == 'Quote':
self.quoteList = snippetList
return data.store("quoteList", self.quoteList)
self.factList = snippetList
return data.store("factList", self.factList)
def countSnippets(self, message: core.BotMessage) -> None:
"""Counts the number of snippets
Arguments:
message {Message} -- the Message object that invoked the command
"""
kind = 'fact'
snippetList = self.factList
if "topic" in message.arguments[0]:
kind = 'topic'
snippetList = self.topicList
elif "quote" in message.arguments[0]:
kind = 'quote'
snippetList = self.quoteList
if message.room:
room = message.room
elif len(message.arguments) > 1:
room = message.connection.getRoom(message.arguments[1])
else:
return message.respond("You must specify a room.")
num = 0
if snippetList and room.id in snippetList.keys(): num = len(snippetList[room.id])
return message.respond(f"There {'is ' if num == 1 else 'are '} {str(num)} \
{kind}{'' if num == 1 else 's'} for the room {room.id}.")
def exportSnippets(self, message: core.BotMessage) -> None:
"""Exports the snippets to Pastebin
Arguments:
message {Message} -- the Message object that invoked the command
"""
kind = 'fact'
snippetList = self.factList
if "topic" in message.arguments[0]:
kind = 'topic'
snippetList = self.topicList
elif "quote" in message.arguments[0]:
kind = 'quote'
snippetList = self.quoteList
if message.room:
room = message.room
elif len(message.arguments) > 1:
room = message.connection.getRoom(message.arguments[1])
else:
return message.respond("You must specify a room.")
if not message.sender.can("addfact", room): return message.respond("Permission denied.")
if room.id not in snippetList.keys() or len(snippetList[room.id]) == 0:
return message.respond(f"There are no {kind}s for the room {room.id}.")
pasteData = "\n".join(snippetList[room.id])
return message.respond(str(Pastebin(config.pastebinAPIKey).create_paste(
pasteData, # the data
1, # unlisted paste
f"{kind.title()}s for room {room.id}" # title
)))
def __str__(self) -> str:
"""String representation of the Module
Returns:
string -- representation
"""
return f"Conversation module: displays snippets of text in chat. Commands: {', '.join(self.commands.keys())}" | python | 16 | 0.599067 | 117 | 41.63253 | 166 | Represents a module, which may contain several commands
| class |
class Thread:
"""A builder for a profile of a single thread.
Attributes:
comm: Thread command-line (name).
pid: process ID of containing process.
tid: thread ID.
samples: Timeline of profile samples.
frameTable: interned stack frame ID -> stack frame.
stringTable: interned string ID -> string.
stringMap: interned string -> string ID.
stackTable: interned stack ID -> stack.
stackMap: (stack prefix ID, leaf stack frame ID) -> interned Stack ID.
frameMap: Stack Frame string -> interned Frame ID.
"""
comm: str
pid: int
tid: int
samples: List[Sample] = field(default_factory=list)
frameTable: List[Frame] = field(default_factory=list)
stringTable: List[str] = field(default_factory=list)
# TODO: this is redundant with frameTable, could we remove this?
stringMap: Dict[str, int] = field(default_factory=dict)
stackTable: List[Stack] = field(default_factory=list)
stackMap: Dict[Tuple[Optional[int], int], int] = field(default_factory=dict)
frameMap: Dict[str, int] = field(default_factory=dict)
def _intern_stack(self, frame_id: int, prefix_id: Optional[int]) -> int:
"""Gets a matching stack, or saves the new stack. Returns a Stack ID."""
key = (prefix_id, frame_id)
stack_id = self.stackMap.get(key)
if stack_id is not None:
return stack_id
stack_id = len(self.stackTable)
self.stackTable.append(Stack(prefix_id=prefix_id,
frame_id=frame_id,
category_id=0))
self.stackMap[key] = stack_id
return stack_id
def _intern_string(self, string: str) -> int:
"""Gets a matching string, or saves the new string. Returns a String ID."""
string_id = self.stringMap.get(string)
if string_id is not None:
return string_id
string_id = len(self.stringTable)
self.stringTable.append(string)
self.stringMap[string] = string_id
return string_id
def _intern_frame(self, frame_str: str) -> int:
"""Gets a matching stack frame, or saves the new frame. Returns a Frame ID."""
frame_id = self.frameMap.get(frame_str)
if frame_id is not None:
return frame_id
frame_id = len(self.frameTable)
self.frameMap[frame_str] = frame_id
string_id = self._intern_string(frame_str)
category = 0
# Heuristic: kernel code contains "kallsyms" as the library name.
if "kallsyms" in frame_str or ".ko" in frame_str:
category = 1
elif ".so" in frame_str:
category = 2
elif ".vdex" in frame_str:
category = 3
elif ".oat" in frame_str:
category = 4
self.frameTable.append(Frame(
string_id=string_id,
relevantForJS=False,
innerWindowID=0,
implementation=None,
optimizations=None,
line=None,
column=None,
category=category,
subcategory=0,
))
return frame_id
def _add_sample(self, comm: str, stack: List[str], time_ms: Milliseconds) -> None:
"""Add a timestamped stack trace sample to the thread builder.
Args:
comm: command-line (name) of the thread at this sample
stack: sampled stack frames. Root first, leaf last.
time_ms: timestamp of sample in milliseconds
"""
# Unix threads often don't set their name immediately upon creation.
# Use the last name
if self.comm != comm:
self.comm = comm
prefix_stack_id = None
for frame in stack:
frame_id = self._intern_frame(frame)
prefix_stack_id = self._intern_stack(frame_id, prefix_stack_id)
self.samples.append(Sample(stack_id=prefix_stack_id,
time_ms=time_ms,
responsiveness=0))
def _to_json_dict(self) -> Dict:
"""Converts this Thread to GeckoThread JSON format."""
# The samples aren't guaranteed to be in order. Sort them by time.
self.samples.sort(key=lambda s: s.time_ms)
# Gecko profile format is row-oriented data as List[List],
# And a schema for interpreting each index.
# Schema:
# https://github.com/firefox-devtools/profiler/blob/main/docs-developer/gecko-profile-format.md
# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L230
return {
"tid": self.tid,
"pid": self.pid,
"name": self.comm,
# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L51
"markers": {
"schema": {
"name": 0,
"startTime": 1,
"endTime": 2,
"phase": 3,
"category": 4,
"data": 5,
},
"data": [],
},
# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L90
"samples": {
"schema": {
"stack": 0,
"time": 1,
"responsiveness": 2,
},
"data": self.samples
},
# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L156
"frameTable": {
"schema": {
"location": 0,
"relevantForJS": 1,
"innerWindowID": 2,
"implementation": 3,
"optimizations": 4,
"line": 5,
"column": 6,
"category": 7,
"subcategory": 8,
},
"data": self.frameTable,
},
# https://github.com/firefox-devtools/profiler/blob/53970305b51b9b472e26d7457fee1d66cd4e2737/src/types/gecko-profile.js#L216
"stackTable": {
"schema": {
"prefix": 0,
"frame": 1,
"category": 2,
},
"data": self.stackTable,
},
"stringTable": self.stringTable,
"registerTime": 0,
"unregisterTime": None,
"processType": "default",
} | python | 15 | 0.54275 | 136 | 38.591716 | 169 | A builder for a profile of a single thread.
Attributes:
comm: Thread command-line (name).
pid: process ID of containing process.
tid: thread ID.
samples: Timeline of profile samples.
frameTable: interned stack frame ID -> stack frame.
stringTable: interned string ID -> string.
stringMap: interned string -> string ID.
stackTable: interned stack ID -> stack.
stackMap: (stack prefix ID, leaf stack frame ID) -> interned Stack ID.
frameMap: Stack Frame string -> interned Frame ID.
| class |
class FuzzingProtocol:
"""
Common mixin-base class for fuzzing server and client protocols.
"""
MAX_WIRE_LOG_DATA = 256
def connectionMade(self):
self.case = None
self.runCase = None
self.caseAgent = None
self.caseStarted = None
self.caseStart = 0
self.caseEnd = 0
## wire log
##
self.createWirelog = True
self.wirelog = []
## stats for octets and frames
##
self.createStats = True
self.rxOctetStats = {}
self.rxFrameStats = {}
self.txOctetStats = {}
self.txFrameStats = {}
def connectionLost(self, reason):
if self.runCase:
self.runCase.onConnectionLost(self.failedByMe)
self.caseEnd = time.time()
caseResult = {"case": self.case,
"id": caseClasstoId(self.Case),
"description": self.Case.DESCRIPTION,
"expectation": self.Case.EXPECTATION,
"agent": self.caseAgent,
"started": self.caseStarted,
"duration": int(round(1000. * (self.caseEnd - self.caseStart))), # case execution time in ms
"reportTime": self.runCase.reportTime, # True/False switch to control report output of duration
"behavior": self.runCase.behavior,
"behaviorClose": self.runCase.behaviorClose,
"expected": self.runCase.expected,
"expectedClose": self.runCase.expectedClose,
"received": self.runCase.received,
"result": self.runCase.result,
"resultClose": self.runCase.resultClose,
"wirelog": self.wirelog,
"createWirelog": self.createWirelog,
"closedByMe": self.closedByMe,
"failedByMe": self.failedByMe,
"droppedByMe": self.droppedByMe,
"wasClean": self.wasClean,
"wasNotCleanReason": self.wasNotCleanReason,
"wasServerConnectionDropTimeout": self.wasServerConnectionDropTimeout,
"wasCloseHandshakeTimeout": self.wasCloseHandshakeTimeout,
"localCloseCode": self.localCloseCode,
"localCloseReason": self.localCloseReason,
"remoteCloseCode": self.remoteCloseCode,
"remoteCloseReason": self.remoteCloseReason,
"isServer": self.isServer,
"createStats": self.createStats,
"rxOctetStats": self.rxOctetStats,
"rxFrameStats": self.rxFrameStats,
"txOctetStats": self.txOctetStats,
"txFrameStats": self.txFrameStats,
"httpRequest": self.http_request_data,
"httpResponse": self.http_response_data}
self.factory.logCase(caseResult)
# parent's connectionLost does useful things
WebSocketProtocol.connectionLost(self,reason)
def binLogData(self, data):
if len(data) > FuzzingProtocol.MAX_WIRE_LOG_DATA:
dd = binascii.b2a_hex(data[:FuzzingProtocol.MAX_WIRE_LOG_DATA]) + " ..."
else:
dd = binascii.b2a_hex(data)
return dd
def asciiLogData(self, data):
if len(data) > FuzzingProtocol.MAX_WIRE_LOG_DATA:
dd = data[:FuzzingProtocol.MAX_WIRE_LOG_DATA] + " ..."
else:
dd = data
return dd
def enableWirelog(self, enable):
if enable != self.createWirelog:
self.createWirelog = enable
self.wirelog.append(("WLM", enable))
def logRxOctets(self, data):
if self.createStats:
l = len(data)
self.rxOctetStats[l] = self.rxOctetStats.get(l, 0) + 1
if self.createWirelog:
d = str(buffer(data))
self.wirelog.append(("RO", self.binLogData(d)))
else:
WebSocketProtocol.logRxOctets(self, data)
def logTxOctets(self, data, sync):
if self.createStats:
l = len(data)
self.txOctetStats[l] = self.txOctetStats.get(l, 0) + 1
if self.createWirelog:
d = str(buffer(data))
self.wirelog.append(("TO", self.binLogData(d), sync))
else:
WebSocketProtocol.logTxOctets(self, data, sync)
def logRxFrame(self, fin, rsv, opcode, masked, payload_len, mask, payload):
if self.createStats:
self.rxFrameStats[opcode] = self.rxFrameStats.get(opcode, 0) + 1
if self.createWirelog:
d = str(buffer(payload))
self.wirelog.append(("RF", self.asciiLogData(d), opcode, fin, rsv, masked, mask))
else:
WebSocketProtocol.logRxFrame(self, fin, rsv, opcode, masked, payload_len, mask, payload)
def logTxFrame(self, opcode, payload, fin, rsv, mask, payload_len, chopsize, sync):
if self.createStats:
self.txFrameStats[opcode] = self.txFrameStats.get(opcode, 0) + 1
if self.createWirelog:
d = str(buffer(payload))
self.wirelog.append(("TF", self.asciiLogData(d), opcode, fin, rsv, mask, payload_len, chopsize, sync))
else:
WebSocketProtocol.logTxFrame(self, opcode, payload, fin, rsv, mask, payload_len, chopsize, sync)
def executeContinueLater(self, fun, tag):
if self.state != WebSocketProtocol.STATE_CLOSED:
self.wirelog.append(("CTE", tag))
fun()
else:
pass # connection already gone
def continueLater(self, delay, fun, tag = None):
self.wirelog.append(("CT", delay, tag))
reactor.callLater(delay, self.executeContinueLater, fun, tag)
def executeKillAfter(self):
if self.state != WebSocketProtocol.STATE_CLOSED:
self.wirelog.append(("KLE", ))
self.failConnection()
else:
pass # connection already gone
def killAfter(self, delay):
self.wirelog.append(("KL", delay))
reactor.callLater(delay, self.executeKillAfter)
def executeCloseAfter(self):
if self.state != WebSocketProtocol.STATE_CLOSED:
self.wirelog.append(("TIE", ))
self.sendClose()
else:
pass # connection already gone
def closeAfter(self, delay):
self.wirelog.append(("TI", delay))
reactor.callLater(delay, self.executeCloseAfter)
def onOpen(self):
if self.runCase:
cc_id = caseClasstoId(self.runCase.__class__)
if checkAgentCaseExclude(self.factory.specExcludeAgentCases, self.caseAgent, cc_id):
print "Skipping test case %s for agent %s by test configuration!" % (cc_id, self.caseAgent)
self.runCase = None
self.sendClose()
return
else:
self.caseStart = time.time()
self.runCase.onOpen()
elif self.path == "/updateReports":
self.factory.createReports()
self.sendClose()
elif self.path == "/getCaseCount":
self.sendMessage(json.dumps(len(self.factory.specCases)))
self.sendClose()
else:
pass
def onPong(self, payload):
if self.runCase:
self.runCase.onPong(payload)
else:
if self.debug:
log.msg("Pong received: " + payload)
def onClose(self, wasClean, code, reason):
if self.runCase:
self.runCase.onClose(wasClean, code, reason)
else:
if self.debug:
log.msg("Close received: %s - %s" % (code, reason))
def onMessage(self, msg, binary):
if self.runCase:
self.runCase.onMessage(msg, binary)
else:
if binary:
raise Exception("binary command message")
else:
try:
obj = json.loads(msg)
except:
raise Exception("could not parse command")
## send one frame as specified
##
if obj[0] == "sendframe":
pl = obj[1].get("payload", "")
self.sendFrame(opcode = obj[1]["opcode"],
payload = pl.encode("UTF-8"),
fin = obj[1].get("fin", True),
rsv = obj[1].get("rsv", 0),
mask = obj[1].get("mask", None),
payload_len = obj[1].get("payload_len", None),
chopsize = obj[1].get("chopsize", None),
sync = obj[1].get("sync", False))
## send multiple frames as specified
##
elif obj[0] == "sendframes":
frames = obj[1]
for frame in frames:
pl = frame.get("payload", "")
self.sendFrame(opcode = frame["opcode"],
payload = pl.encode("UTF-8"),
fin = frame.get("fin", True),
rsv = frame.get("rsv", 0),
mask = frame.get("mask", None),
payload_len = frame.get("payload_len", None),
chopsize = frame.get("chopsize", None),
sync = frame.get("sync", False))
## send close
##
elif obj[0] == "close":
spec = obj[1]
self.sendClose(spec.get("code", None), spec.get("reason", None))
## echo argument
##
elif obj[0] == "echo":
spec = obj[1]
self.sendFrame(opcode = 1, payload = spec.get("payload", ""), payload_len = spec.get("payload_len", None))
else:
raise Exception("fuzzing peer received unknown command" % obj[0]) | python | 23 | 0.523828 | 121 | 34 | 281 |
Common mixin-base class for fuzzing server and client protocols.
| class |
class FuzzingFactory:
"""
Common mixin-base class for fuzzing server and client protocol factory.
"""
MAX_CASE_PICKLE_LEN = 1000
def __init__(self, debug = False, outdir = "reports"):
self.repeatAgentRowPerSubcategory = True
self.debug = debug
self.outdir = outdir
self.agents = {}
self.cases = {}
def logCase(self, caseResults):
"""
Called from FuzzingProtocol instances when case has been finished to store case results.
"""
agent = caseResults["agent"]
case = caseResults["id"]
## index by agent->case
##
if not self.agents.has_key(agent):
self.agents[agent] = {}
self.agents[agent][case] = caseResults
## index by case->agent
##
if not self.cases.has_key(case):
self.cases[case] = {}
self.cases[case][agent] = caseResults
def createReports(self):
"""
Create reports from all data stored for test cases which have been executed.
"""
## create output directory when non-existent
##
if not os.path.exists(self.outdir):
os.makedirs(self.outdir)
## create master report
##
self.createMasterReport(self.outdir)
## create case detail reports
##
for agentId in self.agents:
for caseId in self.agents[agentId]:
self.createAgentCaseReport(agentId, caseId, self.outdir)
def cleanForFilename(self, str):
"""
Clean a string for use as filename.
"""
s0 = ''.join([c if c in "abcdefghjiklmnopqrstuvwxyz0123456789" else " " for c in str.strip().lower()])
s1 = s0.strip()
s2 = s1.replace(' ', '_')
return s2
def makeAgentCaseReportFilename(self, agentId, caseId):
"""
Create filename for case detail report from agent and case.
"""
c = caseId.replace('.', '_')
return self.cleanForFilename(agentId) + "_case_" + c + ".html"
def limitString(self, s, limit, indicator = " ..."):
ss = str(s)
if len(ss) > limit - len(indicator):
return ss[:limit - len(indicator)] + indicator
else:
return ss
def createMasterReport(self, outdir):
"""
Create report master HTML file.
:param outdir: Directory where to create file.
:type outdir: str
:returns: str -- Name of created file.
"""
## open report file in create / write-truncate mode
##
report_filename = "index.html"
f = open(os.path.join(outdir, report_filename), 'w')
## write HTML
##
f.write('<!DOCTYPE html>\n')
f.write('<html>\n')
f.write(' <head>\n')
f.write(' <meta charset="utf-8" />\n')
f.write(' <style lang="css">%s</style>\n' % CSS_COMMON)
f.write(' <style lang="css">%s</style>\n' % CSS_MASTER_REPORT)
f.write(' <script language="javascript">%s</script>\n' % JS_MASTER_REPORT % {"agents_cnt": len(self.agents.keys())})
f.write(' </head>\n')
f.write(' <body>\n')
f.write(' <a href="#"><div id="toggle_button" class="unselectable" onclick="toggleClose();">Toggle Details</div></a>\n')
f.write(' <a name="top"></a>\n')
f.write(' <br/>\n')
## top logos
f.write(' <center><img src="http://www.tavendo.de/static/autobahn/ws_protocol_test_report.png" border="0" width="820" height="46" alt="WebSockets Protocol Test Report"></img></a></center>\n')
f.write(' <center><a href="http://www.tavendo.de/autobahn" title="Autobahn WebSockets"><img src="http://www.tavendo.de/static/autobahn/ws_protocol_test_report_autobahn.png" border="0" width="300" height="68" alt="Autobahn WebSockets"></img></a></center>\n')
## write report header
##
f.write(' <div id="master_report_header" class="block">\n')
f.write(' <p id="intro">Summary report generated on %s (UTC) by <a href="%s">Autobahn WebSockets</a> v%s.</p>\n' % (utcnow(), "http://www.tavendo.de/autobahn", str(autobahn.version)))
f.write("""
<table id="case_outcome_desc">
<tr>
<td class="case_ok">Pass</td>
<td class="outcome_desc">Test case was executed and passed successfully.</td>
</tr>
<tr>
<td class="case_non_strict">Non-Strict</td>
<td class="outcome_desc">Test case was executed and passed non-strictly.
A non-strict behavior is one that does not adhere to a SHOULD-behavior as described in the protocol specification or
a well-defined, canonical behavior that appears to be desirable but left open in the protocol specification.
An implementation with non-strict behavior is still conformant to the protocol specification.</td>
</tr>
<tr>
<td class="case_failed">Fail</td>
<td class="outcome_desc">Test case was executed and failed. An implementation which fails a test case - other
than a performance/limits related one - is non-conforming to a MUST-behavior as described in the protocol specification.</td>
</tr>
<tr>
<td class="case_info">Info</td>
<td class="outcome_desc">Informational test case which detects certain implementation behavior left unspecified by the spec
but nevertheless potentially interesting to implementors.</td>
</tr>
<tr>
<td class="case_missing">Missing</td>
<td class="outcome_desc">Test case is missing, either because it was skipped via the test suite configuration
or deactivated, i.e. because the implementation does not implement the tested feature or breaks during running
the test case.</td>
</tr>
</table>
""")
f.write(' </div>\n')
## write big agent/case report table
##
f.write(' <table id="agent_case_results">\n')
## sorted list of agents for which test cases where run
##
agentList = sorted(self.agents.keys())
## create list ordered list of case Ids
##
cl = []
for c in Cases:
t = caseClasstoIdTuple(c)
cl.append((t, caseIdTupletoId(t)))
cl = sorted(cl)
caseList = []
for c in cl:
caseList.append(c[1])
lastCaseCategory = None
lastCaseSubCategory = None
for caseId in caseList:
caseCategoryIndex = caseId.split('.')[0]
caseCategory = CaseCategories.get(caseCategoryIndex, "Misc")
caseSubCategoryIndex = '.'.join(caseId.split('.')[:2])
caseSubCategory = CaseSubCategories.get(caseSubCategoryIndex, None)
## Category/Agents row
##
if caseCategory != lastCaseCategory or (self.repeatAgentRowPerSubcategory and caseSubCategory != lastCaseSubCategory):
f.write(' <tr class="case_category_row">\n')
f.write(' <td class="case_category">%s %s</td>\n' % (caseCategoryIndex, caseCategory))
for agentId in agentList:
f.write(' <td class="agent close_flex" colspan="2">%s</td>\n' % agentId)
f.write(' </tr>\n')
lastCaseCategory = caseCategory
lastCaseSubCategory = None
## Subcategory row
##
if caseSubCategory != lastCaseSubCategory:
f.write(' <tr class="case_subcategory_row">\n')
f.write(' <td class="case_subcategory" colspan="%d">%s %s</td>\n' % (len(agentList) * 2 + 1, caseSubCategoryIndex, caseSubCategory))
f.write(' </tr>\n')
lastCaseSubCategory = caseSubCategory
## Cases row
##
f.write(' <tr class="agent_case_result_row">\n')
f.write(' <td class="case"><a href="#case_desc_%s">Case %s</a></td>\n' % (caseId.replace('.', '_'), caseId))
## Case results
##
for agentId in agentList:
if self.agents[agentId].has_key(caseId):
case = self.agents[agentId][caseId]
agent_case_report_file = self.makeAgentCaseReportFilename(agentId, caseId)
if case["behavior"] == Case.OK:
td_text = "Pass"
td_class = "case_ok"
elif case["behavior"] == Case.NON_STRICT:
td_text = "Non-Strict"
td_class = "case_non_strict"
elif case["behavior"] == Case.NO_CLOSE:
td_text = "No Close"
td_class = "case_no_close"
elif case["behavior"] == Case.INFORMATIONAL:
td_text = "Info"
td_class = "case_info"
else:
td_text = "Fail"
td_class = "case_failed"
if case["behaviorClose"] == Case.OK:
ctd_text = "%s" % str(case["remoteCloseCode"])
ctd_class = "case_ok"
elif case["behaviorClose"] == Case.FAILED_BY_CLIENT:
ctd_text = "%s" % str(case["remoteCloseCode"])
ctd_class = "case_almost"
elif case["behaviorClose"] == Case.WRONG_CODE:
ctd_text = "%s" % str(case["remoteCloseCode"])
ctd_class = "case_non_strict"
elif case["behaviorClose"] == Case.UNCLEAN:
ctd_text = "Unclean"
ctd_class = "case_failed"
elif case["behaviorClose"] == Case.INFORMATIONAL:
ctd_text = "%s" % str(case["remoteCloseCode"])
ctd_class = "case_info"
else:
ctd_text = "Fail"
ctd_class = "case_failed"
if case["reportTime"]:
f.write(' <td class="%s"><a href="%s">%s</a><br/><span class="case_duration">%s ms</span></td><td class="close close_hide %s"><span class="close_code">%s</span></td>\n' % (td_class, agent_case_report_file, td_text, case["duration"],ctd_class,ctd_text))
else:
f.write(' <td class="%s"><a href="%s">%s</a></td><td class="close close_hide %s"><span class="close_code">%s</span></td>\n' % (td_class, agent_case_report_file, td_text,ctd_class,ctd_text))
else:
f.write(' <td class="case_missing close_flex" colspan="2">Missing</td>\n')
f.write(" </tr>\n")
f.write(" </table>\n")
f.write(" <br/><hr/>\n")
## Case descriptions
##
f.write(' <div id="test_case_descriptions">\n')
for caseId in caseList:
CCase = CasesById[caseId]
f.write(' <br/>\n')
f.write(' <a name="case_desc_%s"></a>\n' % caseId.replace('.', '_'))
f.write(' <h2>Case %s</h2>\n' % caseId)
f.write(' <a class="up" href="#top">Up</a>\n')
f.write(' <p class="case_text_block case_desc"><b>Case Description</b><br/><br/>%s</p>\n' % CCase.DESCRIPTION)
f.write(' <p class="case_text_block case_expect"><b>Case Expectation</b><br/><br/>%s</p>\n' % CCase.EXPECTATION)
f.write(' </div>\n')
f.write(" <br/><hr/>\n")
## end of HTML
##
f.write(" </body>\n")
f.write("</html>\n")
## close created HTML file and return filename
##
f.close()
return report_filename
def createAgentCaseReport(self, agentId, caseId, outdir):
"""
Create case detail report HTML file.
:param agentId: ID of agent for which to generate report.
:type agentId: str
:param caseId: ID of case for which to generate report.
:type caseId: str
:param outdir: Directory where to create file.
:type outdir: str
:returns: str -- Name of created file.
"""
if not self.agents.has_key(agentId):
raise Exception("no test data stored for agent %s" % agentId)
if not self.agents[agentId].has_key(caseId):
raise Exception("no test data stored for case %s with agent %s" % (caseId, agentId))
## get case to generate report for
##
case = self.agents[agentId][caseId]
## open report file in create / write-truncate mode
##
report_filename = self.makeAgentCaseReportFilename(agentId, caseId)
f = open(os.path.join(outdir, report_filename), 'w')
## write HTML
##
f.write('<!DOCTYPE html>\n')
f.write('<html>\n')
f.write(' <head>\n')
f.write(' <meta charset="utf-8" />\n')
f.write(' <style lang="css">%s</style>\n' % CSS_COMMON)
f.write(' <style lang="css">%s</style>\n' % CSS_DETAIL_REPORT)
f.write(' </head>\n')
f.write(' <body>\n')
f.write(' <a name="top"></a>\n')
f.write(' <br/>\n')
## top logos
f.write(' <center><img src="http://www.tavendo.de/static/autobahn/ws_protocol_test_report.png" border="0" width="820" height="46" alt="WebSockets Protocol Test Report"></img></a></center>\n')
f.write(' <center><a href="http://www.tavendo.de/autobahn" title="Autobahn WebSockets"><img src="http://www.tavendo.de/static/autobahn/ws_protocol_test_report_autobahn.png" border="0" width="300" height="68" alt="Autobahn WebSockets"></img></a></center>\n')
f.write(' <br/>\n')
## Case Summary
##
if case["behavior"] == Case.OK:
style = "case_ok"
text = "Pass"
elif case["behavior"] == Case.NON_STRICT:
style = "case_non_strict"
text = "Non-Strict"
elif case["behavior"] == Case.INFORMATIONAL:
style = "case_info"
text = "Informational"
else:
style = "case_failed"
text = "Fail"
f.write(' <p class="case %s">%s - <span style="font-size: 1.3em;"><b>Case %s</b></span> : %s - <span style="font-size: 0.9em;"><b>%d</b> ms @ %s</a></p>\n' % (style, case["agent"], caseId, text, case["duration"], case["started"]))
## Case Description, Expectation, Outcome, Case Closing Behavior
##
f.write(' <p class="case_text_block case_desc"><b>Case Description</b><br/><br/>%s</p>\n' % case["description"])
f.write(' <p class="case_text_block case_expect"><b>Case Expectation</b><br/><br/>%s</p>\n' % case["expectation"])
f.write("""
<p class="case_text_block case_outcome">
<b>Case Outcome</b><br/><br/>%s<br/><br/>
<i>Expected:</i><br/><span class="case_pickle">%s</span><br/><br/>
<i>Observed:</i><br><span class="case_pickle">%s</span>
</p>\n""" % (case.get("result", ""), self.limitString(case.get("expected", ""), FuzzingFactory.MAX_CASE_PICKLE_LEN), self.limitString(case.get("received", ""), FuzzingFactory.MAX_CASE_PICKLE_LEN)))
f.write(' <p class="case_text_block case_closing_beh"><b>Case Closing Behavior</b><br/><br/>%s (%s)</p>\n' % (case.get("resultClose", ""), case.get("behaviorClose", "")))
f.write(" <br/><hr/>\n")
## Opening Handshake
##
f.write(' <h2>Opening Handshake</h2>\n')
f.write(' <pre class="http_dump">%s</pre>\n' % case["httpRequest"].strip())
f.write(' <pre class="http_dump">%s</pre>\n' % case["httpResponse"].strip())
f.write(" <br/><hr/>\n")
## Closing Behavior
##
cbv = [("isServer", "True, iff I (the fuzzer) am a server, and the peer is a client."),
("closedByMe", "True, iff I have initiated closing handshake (that is, did send close first)."),
("failedByMe", "True, iff I have failed the WS connection (i.e. due to protocol error). Failing can be either by initiating closing handshake or brutal drop TCP."),
("droppedByMe", "True, iff I dropped the TCP connection."),
("wasClean", "True, iff full WebSockets closing handshake was performed (close frame sent and received) _and_ the server dropped the TCP (which is its responsibility)."),
("wasNotCleanReason", "When wasClean == False, the reason what happened."),
("wasServerConnectionDropTimeout", "When we are a client, and we expected the server to drop the TCP, but that didn't happen in time, this gets True."),
("wasCloseHandshakeTimeout", "When we initiated a closing handshake, but the peer did not respond in time, this gets True."),
("localCloseCode", "The close code I sent in close frame (if any)."),
("localCloseReason", "The close reason I sent in close frame (if any)."),
("remoteCloseCode", "The close code the peer sent me in close frame (if any)."),
("remoteCloseReason", "The close reason the peer sent me in close frame (if any).")
]
f.write(' <h2>Closing Behavior</h2>\n')
f.write(' <table>\n')
f.write(' <tr class="stats_header"><td>Key</td><td class="left">Value</td><td class="left">Description</td></tr>\n')
for c in cbv:
f.write(' <tr class="stats_row"><td>%s</td><td class="left">%s</td><td class="left">%s</td></tr>\n' % (c[0], case[c[0]], c[1]))
f.write(' </table>')
f.write(" <br/><hr/>\n")
## Wire Statistics
##
f.write(' <h2>Wire Statistics</h2>\n')
if not case["createStats"]:
f.write(' <p style="margin-left: 40px; color: #f00;"><i>Statistics for octets/frames disabled!</i></p>\n')
else:
## octet stats
##
for statdef in [("Received", case["rxOctetStats"]), ("Transmitted", case["txOctetStats"])]:
f.write(' <h3>Octets %s by Chop Size</h3>\n' % statdef[0])
f.write(' <table>\n')
stats = statdef[1]
total_cnt = 0
total_octets = 0
f.write(' <tr class="stats_header"><td>Chop Size</td><td>Count</td><td>Octets</td></tr>\n')
for s in sorted(stats.keys()):
f.write(' <tr class="stats_row"><td>%d</td><td>%d</td><td>%d</td></tr>\n' % (s, stats[s], s * stats[s]))
total_cnt += stats[s]
total_octets += s * stats[s]
f.write(' <tr class="stats_total"><td>Total</td><td>%d</td><td>%d</td></tr>\n' % (total_cnt, total_octets))
f.write(' </table>\n')
## frame stats
##
for statdef in [("Received", case["rxFrameStats"]), ("Transmitted", case["txFrameStats"])]:
f.write(' <h3>Frames %s by Opcode</h3>\n' % statdef[0])
f.write(' <table>\n')
stats = statdef[1]
total_cnt = 0
f.write(' <tr class="stats_header"><td>Opcode</td><td>Count</td></tr>\n')
for s in sorted(stats.keys()):
f.write(' <tr class="stats_row"><td>%d</td><td>%d</td></tr>\n' % (s, stats[s]))
total_cnt += stats[s]
f.write(' <tr class="stats_total"><td>Total</td><td>%d</td></tr>\n' % (total_cnt))
f.write(' </table>\n')
f.write(" <br/><hr/>\n")
## Wire Log
##
f.write(' <h2>Wire Log</h2>\n')
if not case["createWirelog"]:
f.write(' <p style="margin-left: 40px; color: #f00;"><i>Wire log after handshake disabled!</i></p>\n')
f.write(' <div id="wirelog">\n')
wl = case["wirelog"]
i = 0
for t in wl:
if t[0] == "RO":
prefix = "RX OCTETS"
css_class = "wirelog_rx_octets"
elif t[0] == "TO":
prefix = "TX OCTETS"
if t[2]:
css_class = "wirelog_tx_octets_sync"
else:
css_class = "wirelog_tx_octets"
elif t[0] == "RF":
prefix = "RX FRAME "
css_class = "wirelog_rx_frame"
elif t[0] == "TF":
prefix = "TX FRAME "
if t[8] or t[7] is not None:
css_class = "wirelog_tx_frame_sync"
else:
css_class = "wirelog_tx_frame"
elif t[0] in ["CT", "CTE", "KL", "KLE", "TI", "TIE", "WLM"]:
pass
else:
raise Exception("logic error (unrecognized wire log row type %s - row %s)" % (t[0], str(t)))
if t[0] in ["RO", "TO", "RF", "TF"]:
lines = textwrap.wrap(t[1], 100)
if t[0] in ["RO", "TO"]:
if len(lines) > 0:
f.write(' <pre class="%s">%03d %s: %s</pre>\n' % (css_class, i, prefix, lines[0]))
for ll in lines[1:]:
f.write(' <pre class="%s">%s%s</pre>\n' % (css_class, (2+4+len(prefix))*" ", ll))
else:
if t[0] == "RF":
if t[6]:
mmask = binascii.b2a_hex(t[6])
else:
mmask = str(t[6])
f.write(' <pre class="%s">%03d %s: OPCODE=%s, FIN=%s, RSV=%s, MASKED=%s, MASK=%s</pre>\n' % (css_class, i, prefix, str(t[2]), str(t[3]), str(t[4]), str(t[5]), mmask))
elif t[0] == "TF":
f.write(' <pre class="%s">%03d %s: OPCODE=%s, FIN=%s, RSV=%s, MASK=%s, PAYLOAD-REPEAT-LEN=%s, CHOPSIZE=%s, SYNC=%s</pre>\n' % (css_class, i, prefix, str(t[2]), str(t[3]), str(t[4]), str(t[5]), str(t[6]), str(t[7]), str(t[8])))
else:
raise Exception("logic error")
for ll in lines:
f.write(' <pre class="%s">%s%s</pre>\n' % (css_class, (2+4+len(prefix))*" ", ll))
elif t[0] == "WLM":
if t[1]:
f.write(' <pre class="wirelog_delay">%03d WIRELOG ENABLED</pre>\n' % (i))
else:
f.write(' <pre class="wirelog_delay">%03d WIRELOG DISABLED</pre>\n' % (i))
elif t[0] == "CT":
f.write(' <pre class="wirelog_delay">%03d DELAY %f sec for TAG %s</pre>\n' % (i, t[1], t[2]))
elif t[0] == "CTE":
f.write(' <pre class="wirelog_delay">%03d DELAY TIMEOUT on TAG %s</pre>\n' % (i, t[1]))
elif t[0] == "KL":
f.write(' <pre class="wirelog_kill_after">%03d FAIL CONNECTION AFTER %f sec</pre>\n' % (i, t[1]))
elif t[0] == "KLE":
f.write(' <pre class="wirelog_kill_after">%03d FAILING CONNECTION</pre>\n' % (i))
elif t[0] == "TI":
f.write(' <pre class="wirelog_kill_after">%03d CLOSE CONNECTION AFTER %f sec</pre>\n' % (i, t[1]))
elif t[0] == "TIE":
f.write(' <pre class="wirelog_kill_after">%03d CLOSING CONNECTION</pre>\n' % (i))
else:
raise Exception("logic error (unrecognized wire log row type %s - row %s)" % (t[0], str(t)))
i += 1
if case["droppedByMe"]:
f.write(' <pre class="wirelog_tcp_closed_by_me">%03d TCP DROPPED BY ME</pre>\n' % i)
else:
f.write(' <pre class="wirelog_tcp_closed_by_peer">%03d TCP DROPPED BY PEER</pre>\n' % i)
f.write(' </div>\n')
f.write(" <br/><hr/>\n")
## end of HTML
##
f.write(" </body>\n")
f.write("</html>\n")
## close created HTML file and return filename
##
f.close()
return report_filename | python | 25 | 0.515679 | 281 | 42.132841 | 542 |
Common mixin-base class for fuzzing server and client protocol factory.
| class |
class JTOPGUI:
"""
The easiest way to use curses is to use a wrapper around a main function
Essentially, what goes in the main function is the body of your program,
The `stdscr' parameter passed to it is the curses screen generated by our
wrapper.
"""
COLORS = {"RED": 1, "GREEN": 2, "YELLOW": 3, "BLUE": 4, "MAGENTA": 5, "CYAN": 6}
def __init__(self, stdscr, jetson, pages, init_page=0, start=True, loop=False, seconds=5):
# Define pairing colors
curses.init_pair(1, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(3, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_BLUE, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_CYAN, curses.COLOR_BLACK)
# background
curses.init_pair(7, curses.COLOR_WHITE, curses.COLOR_RED)
curses.init_pair(8, curses.COLOR_WHITE, curses.COLOR_GREEN)
curses.init_pair(9, curses.COLOR_BLACK, curses.COLOR_YELLOW)
curses.init_pair(10, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(11, curses.COLOR_WHITE, curses.COLOR_MAGENTA)
curses.init_pair(12, curses.COLOR_WHITE, curses.COLOR_CYAN)
# Set curses reference, refresh and jetson controller
self.stdscr = stdscr
self.jetson = jetson
self.message = False
# Initialize all Object pages
self.pages = []
for obj in pages:
page = obj(stdscr, jetson)
page.setcontroller(self)
self.pages += [page]
# Set default page
self.n_page = 0
self.set(init_page)
# Initialize keyboard status
self.key = -1
self.old_key = -1
# Initialize mouse
self.mouse = ()
# Run the GUI
if start:
self.run(loop, seconds)
def run(self, loop, seconds):
# In this program, we don't want keystrokes echoed to the console,
# so we run this to disable that
curses.noecho()
# Additionally, we want to make it so that the user does not have to press
# enter to send keys to our program, so here is how we get keys instantly
curses.cbreak()
# Try to hide the cursor
if hasattr(curses, 'curs_set'):
try:
curses.curs_set(0)
except Exception:
pass
# Lastly, keys such as the arrow keys are sent as funny escape sequences to
# our program. We can make curses give us nicer values (such as curses.KEY_LEFT)
# so it is easier on us.
self.stdscr.keypad(True)
# Enable mouse mask
_, _ = curses.mousemask(curses.BUTTON1_CLICKED)
# Refreshing page curses loop
# https://stackoverflow.com/questions/54409978/python-curses-refreshing-text-with-a-loop
self.stdscr.nodelay(1)
# Using current time
old = datetime.now()
# Here is the loop of our program, we keep clearing and redrawing in this loop
while not self.events() and self.jetson.ok(spin=True):
# Draw pages
self.draw()
# Increase page automatically if loop enabled
if loop and datetime.now() - old >= timedelta(seconds=seconds):
self.increase(loop=True)
old = datetime.now()
@check_size(20, 50)
def draw(self):
# First, clear the screen
self.stdscr.erase()
# Write head of the jtop
self.header()
# Get page selected
page = self.pages[self.n_page]
# Draw the page
page.draw(self.key, self.mouse)
# Draw menu
self.menu()
# Draw the screen
self.stdscr.refresh()
# Set a timeout and read keystroke
self.stdscr.timeout(GUI_REFRESH)
def increase(self, loop=False):
# check reset
if loop and self.n_page >= len(self.pages) - 1:
idx = 0
else:
idx = self.n_page + 1
# Fix set new page
self.set(idx + 1)
def decrease(self, loop=False):
# check reset
if loop and self.n_page <= 0:
idx = len(self.pages) + 1
else:
idx = self.n_page + 1
self.set(idx - 1)
def set(self, idx):
if idx <= len(self.pages) and idx > 0:
self.n_page = idx - 1
@check_curses
def header(self):
# Title script
# Reference: https://stackoverflow.com/questions/25872409/set-gnome-terminal-window-title-in-python
status = [self.jetson.board.hardware["TYPE"]]
if self.jetson.jetson_clocks is not None:
status += ["JC: {jc}".format(jc=self.jetson.jetson_clocks.status.capitalize())]
if self.jetson.nvpmodel is not None:
status += [self.jetson.nvpmodel.name.replace('MODE_', '').replace('_', ' ')]
str_xterm = ' - '.join(status)
# Print jtop basic info
set_xterm_title("jtop {name}".format(name=str_xterm))
# Add extra Line if with sudo
idx = 0
if self.jetson.interval != self.jetson.interval_user:
self.message = True
_, width = self.stdscr.getmaxyx()
self.stdscr.addstr(0, 0, ("{0:<" + str(width) + "}").format(" "), curses.color_pair(9))
user = int(self.jetson.interval_user * 1000)
interval = int(self.jetson.interval * 1000)
string_sudo = "I CANNOT SET SPEED AT {user}ms - SERVER AT {interval}ms".format(user=user, interval=interval)
self.stdscr.addstr(0, (width - len(string_sudo)) // 2, string_sudo, curses.color_pair(9))
idx = 1
# Write first line
message = "{info[machine]} - Jetpack {info[jetpack]} [L4T {info[L4T]}]".format(info=self.jetson.board.info)
self.stdscr.addstr(idx, 0, message, curses.A_BOLD)
@check_curses
def menu(self):
height, width = self.stdscr.getmaxyx()
# Set background for all menu line
self.stdscr.addstr(height - 1, 0, ("{0:<" + str(width - 1) + "}").format(" "), curses.A_REVERSE)
position = 1
for idx, page in enumerate(self.pages):
color = curses.A_NORMAL if self.n_page == idx else curses.A_REVERSE
self.stdscr.addstr(height - 1, position, str(idx + 1), color | curses.A_BOLD)
self.stdscr.addstr(height - 1, position + 1, page.name + " ", color)
position += len(page.name) + 3
self.stdscr.addstr(height - 1, position, "Q", curses.A_REVERSE | curses.A_BOLD)
self.stdscr.addstr(height - 1, position + 1, "uit ", curses.A_REVERSE)
# Author name
name_author = "Raffaello Bonghi"
self.stdscr.addstr(height - 1, width - len(name_author), name_author, curses.A_REVERSE)
def event_menu(self, mx, my):
height, _ = self.stdscr.getmaxyx()
# Check if is an event menu
if my == height - 1:
# Check which page
position = 1
for idx, page in enumerate(self.pages):
size = len(page.name) + 3
# Check if mouse is inside menu name
if mx >= position and mx < position + size:
# Set new page
self.set(idx + 1)
return False
# Increase counter
position += size
# Quit button
if mx >= position and mx < position + 4:
return True
return False
def events(self):
event = self.stdscr.getch()
# Run keyboard check
status_mouse = False
status_keyboard = self.keyboard(event)
# Clear event mouse
self.mouse = ()
# Check event mouse
if event == curses.KEY_MOUSE:
try:
_, mx, my, _, _ = curses.getmouse()
# Run event menu controller
status_mouse = self.event_menu(mx, my)
self.mouse = (mx, my)
except curses.error:
pass
return status_keyboard or status_mouse
def keyboard(self, event):
self.key = event
if self.old_key != self.key:
# keyboard check list
if self.key == curses.KEY_LEFT:
self.decrease(loop=True)
elif self.key == curses.KEY_RIGHT or self.key == ord('\t'):
self.increase(loop=True)
elif self.key in [ord(str(n)) for n in range(10)]:
num = int(chr(self.key))
self.set(num)
elif self.key == ord('q') or self.key == ord('Q') or self.ESC_BUTTON(self.key):
# keyboard check quit button
return True
else:
page = self.pages[self.n_page]
# Run key controller
page.keyboard(self.key)
# Store old value key
self.old_key = self.key
return False
def ESC_BUTTON(self, key):
"""
Check there is another character prevent combination ALT + <OTHER CHR>
https://stackoverflow.com/questions/5977395/ncurses-and-esc-alt-keys
"""
if key == 27:
n = self.stdscr.getch()
if n == -1:
return True
return False | python | 19 | 0.559839 | 120 | 39.770563 | 231 |
The easiest way to use curses is to use a wrapper around a main function
Essentially, what goes in the main function is the body of your program,
The `stdscr' parameter passed to it is the curses screen generated by our
wrapper.
| class |
class ColorRule:
"""Class representing a text color rendering rule
Attributes
----------
regex : str
A python 're' module string
color : int
A valid color value. Ex. py_cui.WHITE_ON_BLACK
rule_type : str
String representing rule type. ['startswith', 'endswith', 'notstartswith', 'notendswith', 'contains']
match_type : str
String representing the match type. ['line', 'regex', 'region']
region : [int, int]
Start and end positions for the coloring, None if match_type != 'region'
include_whitespace : bool
Flag to determine whether to strip whitespace before matching.
"""
def __init__(self, regex, color, rule_type, match_type, region, include_whitespace):
"""Constructor for ColorRule object
Parameters
----------
regex : str
A python 're' module string
color : int
A valid color value. Ex. py_cui.WHITE_ON_BLACK
rule_type : str
String representing rule type. ['startswith', 'endswith', 'notstartswith', 'notendswith', 'contains']
match_type : str
String representing the match type. ['line', 'regex', 'region']
region : [int, int]
Start and end positions for the coloring, None if match_type != 'region'
include_whitespace : bool
Flag to determine whether to strip whitespace before matching.
"""
self.regex = regex
self.color = color
self.rule_type = rule_type
self.match_type = match_type
self.region = region
if self.region is not None:
if self.region[0] > self.region[1]:
temp = region[0]
self.region[0] = self.region[1]
self.region[1] = temp
self.include_whitespace = include_whitespace
def check_match(self, line):
"""Checks if the color rule matches a line
Parameters
----------
line : str
The input line of text to try to match the rule against
Returns
-------
matched : bool
True if a match was found, false otherwise
"""
temp = line
if not self.include_whitespace:
temp = temp.strip()
if self.rule_type == 'startswith':
if temp.startswith(self.regex):
return True
elif self.rule_type == 'endswith':
if temp.endswith(self.regex):
return True
elif self.rule_type == 'notstartswith':
if temp.startswith(self.regex):
return False
return True
elif self.rule_type == 'notendswith':
if temp.endswith(self.regex):
return False
return True
elif self.rule_type == 'contains':
if re.search(self.regex, line) is not None:
return True
return False
def generate_fragments_regex(self, widget, render_text):
"""Splits text into fragments based on regular expression
Parameters
----------
widget : py_cui.Widget
Widget containing the render text
render_text : str
text being rendered
Returns
-------
fragments : list of lists of [str, color]
the render text split into fragments of strings paired with colors
"""
fragments = []
matches = re.findall(self.regex, render_text)
current_render_text = render_text
for match in matches:
temp = current_render_text.split(match, 1)
if len(temp) == 2:
fragments.append([temp[0], widget.color])
fragments.append([match, self.color])
current_render_text = temp[1]
fragments.append([current_render_text, widget.color])
return fragments
def split_text_on_region(self, widget, render_text):
"""Splits text into fragments based on region
Parameters
----------
widget : py_cui.Widget
Widget containing the render text
render_text : str
text being rendered
Returns
-------
fragments : list of lists of [str, color]
the render text split into fragments of strings paired with colors
"""
fragments = []
if self.region is None or len(render_text) < self.region[0]:
return [[render_text, widget.color]]
elif len(render_text) < self.region[1]:
self.region[1] = len(render_text)
if self.region[0] != 0:
fragments.append([render_text[0:self.region[0]], widget.color])
fragments.append([render_text[self.region[0]:self.region[1]], self.color])
fragments.append([render_text[self.region[1]:], widget.color])
return fragments
def generate_fragments(self, widget, line, render_text):
"""Splits text into fragments if matched line to regex
Parameters
----------
widget : py_cui.Widget
Widget containing the render text
line : str
the line to match
render_text : str
text being rendered
Returns
-------
fragments : list of lists of [str, color]
the render text split into fragments of strings paired with colors
matched : bool
Boolean output saying if a match was found in the line.
"""
match = self.check_match(line)
if match:
if self.match_type == 'line':
return [[render_text, self.color]], True
elif self.match_type == 'regex':
return self.generate_fragments_regex(widget, render_text), True
elif self.match_type == 'region':
return self.split_text_on_region(widget, render_text), True
return [[render_text, widget.color]], False | python | 15 | 0.556737 | 113 | 33.011299 | 177 | Class representing a text color rendering rule
Attributes
----------
regex : str
A python 're' module string
color : int
A valid color value. Ex. py_cui.WHITE_ON_BLACK
rule_type : str
String representing rule type. ['startswith', 'endswith', 'notstartswith', 'notendswith', 'contains']
match_type : str
String representing the match type. ['line', 'regex', 'region']
region : [int, int]
Start and end positions for the coloring, None if match_type != 'region'
include_whitespace : bool
Flag to determine whether to strip whitespace before matching.
| class |
class Field:
"""This Descriptor class is use to define an entity field. It validates field values types
and make transformations over that values to have a normalized version of it. Field needs
to be defined outside the constructor to be applied correctly.
An entity class that implements Field descriptor needs to implement the `named_fields`
decorator in order that Field descriptors works correctly.
Example:
.. code-block:: python
from pydbrepo import Entity, Field, named_fields
@named_fields
class Model(Entity):
name = Field(type_=str)
:type type_: Union[Type, Tuple[Type, ...]]
:param type_: Python Type or Tuple of Python Types that should have the value of the field
:type cast_to: Type
:param cast_to: Python Type that should be casted in case the value don't have the correct one.
This property is used at the same time with `cast_if` param. If `cast_if` is not set, all
values will be casted to the specified type.
:type cast_if: Union[Type, Tuple[Type, ...]]
:param cast_if: Python Type or Tuple of Python Types that describes the type that the field
should be casted to if it is equal to the given value.
:type cast_items_to: Type
:param cast_items_to: Python Type that will be used to cast all items on Iterable object. If
The stored value is not an iterable object and this property is set, it will cause an
error.
:type field: str
:param field: Name of the field that is attached to the descriptor
"""
def __init__(
self,
type_: Union[Type, Tuple[Type, ...]],
cast_to: Optional[Union[Type, Callable[[Any], Any]]] = None,
cast_if: Optional[Union[Type, Tuple[Type, ...]]] = None,
cast_items_to: Optional[Union[Type, Callable[[Any], Any]]] = None,
field: Optional[AnyStr] = None,
):
self.__type = type_
self.__cast_to = cast_to
self.__cast_items_to = cast_items_to
self.__cast_if = cast_if
self.field = field
def __set__(self, instance: Any, value: Any) -> NoReturn:
"""Validate and save field value.
:param instance: Filed owner class
:param value: Given value to assign
"""
self.__validate_types(instance, value)
value = self.__cast_value(instance, value)
instance.__dict__[self.field] = value
def __get__(self, instance: Any, owner_type: Type) -> Any:
"""Return saved field value.
:param instance: Filed owner class
:param owner_type: Python Type of the owner instance
:return Any: Stored value
"""
field = instance.__dict__.get(self.field, None)
if issubclass(type(field), EnumEntity):
return field.value
return field
def __validate_types(self, instance: Any, value: Any) -> NoReturn:
"""Execute type validation for field value.
:param instance: Filed owner class
:param value: Given value to assign
:raise FieldTypeError: If the value is different from expected types
"""
if not isinstance(value, self.__type) and value is not None:
raise FieldTypeError(instance.__class__.__name__, self.field, value, self.__type)
def __cast_value(self, instance: Any, value: Any) -> Any:
"""Execute cast over given value to convert it into an instance of a specific class.
:param instance: Filed owner class
:param value: Un-casted value
:return Any: Casted value
"""
if self.__cast_items_to:
return self.__cast_iterable(instance, value)
if self.__cast_to:
return self.__cast_non_iterables(instance, value)
return value
def __cast_non_iterables(self, instance: Any, value: Any) -> Any:
"""Cast non iterable object values.
:param instance: Filed owner class
:param value: Un-casted value
:return Any: Casted value
:raise FieldCastError: In case the Type of the given value do not match with the given
`cast_if` Type when `cast_if` is different from None.
"""
if self.__verify_self_type_cast(value):
return value
if self.__cast_if is not None:
if isinstance(value, self.__cast_if):
return self.__handle_cast(self.__cast_to, instance, value)
return value
return self.__handle_cast(self.__cast_to, instance, value)
def __verify_self_type_cast(self, value: Any) -> bool:
"""Execute self type casting validations to avoid execute an unnecessary casting.
:param value: Value that should be casted.
:return bool: Assertion flag that indicates if the value should skip casting.
"""
if value is None:
return True
if isinstance(self.__cast_to, FunctionType):
return False
if isinstance(value, self.__cast_to):
return True
return False
def __cast_iterable(self, instance: Any, value: Iterable) -> Any:
"""Cast iterable object items.
:param instance: Filed owner class
:param value: Un-casted value
:return Any: Casted value
"""
if self.__cast_items_to is None or not value:
return value
items = []
for item in value:
if isinstance(item, self.__cast_items_to):
items.append(item)
item = self.__handle_cast(self.__cast_items_to, instance, item)
items.append(item)
# noinspection PyArgumentList
return type(value)(items)
def __handle_cast(self, cast_to: Type, instance: Any, value: Any) -> Any:
"""Return casted value
:param cast_to: Type to be casted
:param value: Un-casted value
:return Any: New instance of the value with the corresponding type
:raise FieldCastError: In case any cast cause an unexpected exception.
"""
try:
# Cast for string dates
if cast_to == datetime:
return parse(value)
if cast_to == date:
return parse(value).date()
if 'from_dict' in set(dir(cast_to)):
return cast_to().from_dict(value)
return cast_to(value)
except Exception as error:
raise FieldCastError(
class_name=instance.__class__.__name__,
field=self.field,
value=value,
cast_to=cast_to,
cast_if=self.__cast_if,
current_type=type(value),
errors=error
) | python | 17 | 0.597325 | 99 | 31.205742 | 209 | This Descriptor class is use to define an entity field. It validates field values types
and make transformations over that values to have a normalized version of it. Field needs
to be defined outside the constructor to be applied correctly.
An entity class that implements Field descriptor needs to implement the `named_fields`
decorator in order that Field descriptors works correctly.
Example:
.. code-block:: python
from pydbrepo import Entity, Field, named_fields
@named_fields
class Model(Entity):
name = Field(type_=str)
:type type_: Union[Type, Tuple[Type, ...]]
:param type_: Python Type or Tuple of Python Types that should have the value of the field
:type cast_to: Type
:param cast_to: Python Type that should be casted in case the value don't have the correct one.
This property is used at the same time with `cast_if` param. If `cast_if` is not set, all
values will be casted to the specified type.
:type cast_if: Union[Type, Tuple[Type, ...]]
:param cast_if: Python Type or Tuple of Python Types that describes the type that the field
should be casted to if it is equal to the given value.
:type cast_items_to: Type
:param cast_items_to: Python Type that will be used to cast all items on Iterable object. If
The stored value is not an iterable object and this property is set, it will cause an
error.
:type field: str
:param field: Name of the field that is attached to the descriptor
| class |
class GeneralMethods:
"""
Class with features sets and general usage methods for model building at every stage.
Attributes:
data_version (int): version for directory generation and data storage
directory (str): directory to store model data
connect: current connection to Teradata
cursor: cursor provided by connection
dmsc_col_names (list of str): list of all NO_DATA columns
meta_vars (set of str): set of meta columns
bad_vars (set of str): set of bad columns to not use them as features
kurt_sk_vars (set of str): set of kurtosis and skew columns
cat_vars (set of str): set of categorical columns with several categories
bin_vars (set of str): set of categorical columns with two categories
all_cats (set of str): set of all categorical columns (bin_vars + cat_vars)
wcat_vars (set of str): set of columns with wrong data types after download from database
vars (list of str): list of features (w/o 'age' and similar)
"""
def __init__(self, data_version: int = 1):
"""
Constructor for GeneralMethods class.
Args:
data_version: data version to specify data folder name
"""
self.data_version = data_version
self.directory = 'data_v' + str(self.data_version)
self.connect = None
self.cursor = None
self.dmsc_col_names = [
'NO_DATA']
self.meta_vars = {
'NO_DATA'}
self.bad_vars = {
'NO_DATA'}
self.kurt_sk_vars = {
'NO_DATA'}
self.cat_vars = {
'NO_DATA'}
self.bin_vars = {
'NO_DATA'}
self.all_cats = self.cat_vars.union(self.bin_vars)
self.wcat_vars = {
'NO_DATA'}
self.vars = (set(self.dmsc_col_names + ['label'])
- self.meta_vars
- self.bad_vars
- self.kurt_sk_vars)
self.vars = [var for var in self.vars if ('age' not in var)]
@timeit
def wcat_features_fix(self, dt: pd.DataFrame) -> pd.DataFrame:
"""
Fix wcat_vars allocated in df.
Args:
dt (DataFrame): pandas DataFrame to transform
Returns:
dt (DataFrame)
"""
for column in self.wcat_vars:
if column in dt.columns:
dt[column] = dt[column].astype('str')
dt[column] = dt[column].str.replace(',', '.')
dt[column] = dt[column].astype('float32')
return dt
def create_data_directory(self):
"""
Creates directory to store data and model, if it`s not exists.
"""
if not os.path.exists(self.directory):
os.makedirs(self.directory)
@timeit
def establish_connection(self, dsn: str = 'NO_DATA'):
"""
Establish connection for specified dsn with autocommit = True.
"""
self.connect = connect(dsn=dsn)
self.cursor = self.connect.cursor()
self.connect.autocommit = True
@staticmethod
@timeit
def split(dt: pd.DataFrame) -> (pd.DataFrame, pd.DataFrame):
"""
Perform train/test split with reset_index = True for specified df.
Args:
dt (DataFrame): df to split
Returns:
df_train, df_test (DataFrame, DataFrame): splitted parts of df
"""
df_train, df_test = train_test_split(dt, test_size=0.20, random_state=333)
df_train.reset_index(inplace=True, drop=True)
df_test.reset_index(inplace=True, drop=True)
return df_train, df_test
@timeit
def cats_fit_transform(self, dt: pd.DataFrame, code_dict: dict, code_vars: list or set) -> (pd.DataFrame, dict):
"""
Apply label encoding fit and transform for specified df and code_vars.
Args:
dt (DataFrame): data frame to fit encoding dict and then transform
code_dict (dict): dict to store codes
code_vars (list/set): list of features to encode
Returns:
dt, code_dict (DataFrame, dict): transformed df and code_dict with stored codes
"""
dt_vars = [var for var in code_vars if var in dt.columns]
for var in dt_vars:
dt.loc[:, var] = dt[var].astype('category')
code_dict[var] = dt[var].cat.categories
dt.loc[:, var] = dt[var].cat.codes
return dt, code_dict
@timeit
def cats_transform(self, dt: pd.DataFrame, code_dict: dict) -> pd.DataFrame:
"""
Apply label encoding to df according to code_dict.
Args:
dt (DataFrame): data frame to category features transform
code_dict (dict): dict with stored codes
Returns:
dt (DataFrame): transformed data frame
"""
for var, _ in code_dict.items():
if var in dt.columns:
dt.loc[:, var] = pd.Categorical(dt[var].values, categories=code_dict[var])
dt.loc[:, var] = dt[var].cat.codes
return dt
@timeit
def download_data(self, file_path: str = '', query: str = '') -> pd.DataFrame:
"""
Tries to download df from file, in case of error - from Teradata and then save to file.
Args:
file_path (str): path including filename to download or save
query (str): SQL Teradata query
Returns:
dt (DataFrame): downloaded data frame
"""
try:
dt = pd.read_feather(file_path)
except IOError:
print('File {} not found and will be downloaded\n '.format(file_path))
self.establish_connection()
self.cursor.execute(query)
dt = pd.DataFrame(self.cursor.fetchallnumpy())
dt.columns = [col.lower() for col in dt.columns]
self.create_data_directory()
dt.to_feather(file_path)
self.connect.close()
return dt | python | 16 | 0.567496 | 116 | 33.267045 | 176 |
Class with features sets and general usage methods for model building at every stage.
Attributes:
data_version (int): version for directory generation and data storage
directory (str): directory to store model data
connect: current connection to Teradata
cursor: cursor provided by connection
dmsc_col_names (list of str): list of all NO_DATA columns
meta_vars (set of str): set of meta columns
bad_vars (set of str): set of bad columns to not use them as features
kurt_sk_vars (set of str): set of kurtosis and skew columns
cat_vars (set of str): set of categorical columns with several categories
bin_vars (set of str): set of categorical columns with two categories
all_cats (set of str): set of all categorical columns (bin_vars + cat_vars)
wcat_vars (set of str): set of columns with wrong data types after download from database
vars (list of str): list of features (w/o 'age' and similar)
| class |
class ReportGenerator:
"""Generate and show crawler result report.
Attributes:
_report_data_dict (dict):
dictionary containing report data.
Example:
{
('01-01-2000', '07-07-2000'): 'noun1 noun2 noun3'
}
"""
REPORT_TEMPLATE = ReportTemplate(
body=(
'{col_data[0]:{col_width[0]}}{col_sep}'
'{col_data[1]:^{col_width[1]}}{col_sep}'
'{col_data[2]:^{col_width[2]}}{col_sep}'
),
rows_separator='-',
columns_separator='|'
)
REPORT_HEADER = ('Начало недели', 'Конец недели', 'Популярные слова')
def __init__(self, report_data_dict):
self._report_data_dict = report_data_dict
@cached_property
def columns_width(self):
"""Estimate width of each report columns.
Popular words column width equals
a length of the longest string in a report_data.values().
Returns:
(tuple): (int, int, int) - estimated columns width.
"""
week_begin_col_width = len(
self.REPORT_HEADER[0]
) + 1 # Note one whitespace at the end
week_end_col_width = len(
self.REPORT_HEADER[1]
) + 2 # Note two whitespaces at the left and right
popular_words_col_width = len(
max(self._report_data_dict.values(), key=len)
) + 2 # Note two whitespaces at the left and right
return (
week_begin_col_width,
week_end_col_width,
popular_words_col_width
)
@cached_property
def table_width(self):
return sum(
self.columns_width
) + 3 # Keep in mind 3 column separators
def _print_boarder(self):
print(self.REPORT_TEMPLATE.rows_separator * self.table_width)
def _print_header(self): # pragma: no cover
"""Print report header.
"""
header = self.REPORT_TEMPLATE.body.format(
col_data=self.REPORT_HEADER,
col_width=self.columns_width,
col_sep=self.REPORT_TEMPLATE.columns_separator
)
self._print_boarder()
print(header)
self._print_boarder()
def _print_body(self): # pragma: no cover
"""Print report body.
"""
for (
week_begin, week_end
), popular_words in self._report_data_dict.items():
body = self.REPORT_TEMPLATE.body.format(
col_data=(week_begin, week_end, popular_words),
col_width=self.columns_width,
col_sep=self.REPORT_TEMPLATE.columns_separator
)
print(body)
self._print_boarder()
def print_report(self): # pragma: no cover
"""Public interface to print a report.
"""
self._print_header()
self._print_body() | python | 16 | 0.541291 | 73 | 29.347368 | 95 | Generate and show crawler result report.
Attributes:
_report_data_dict (dict):
dictionary containing report data.
Example:
{
('01-01-2000', '07-07-2000'): 'noun1 noun2 noun3'
}
| class |
class TestJwtUnattendedAccess:
""" A test suit to verify all the happy path oauth endpoints """
def _update_secrets(self, request):
if request.get("claims", None):
if request["claims"].get("sub", None) == "/replace_me":
request["claims"]['sub'] = self.oauth.client_id
if request["claims"].get("iss", None) == "/replace_me":
request["claims"]['iss'] = self.oauth.client_id
else:
if request.get("sub", None) == "/replace_me":
request['sub'] = self.oauth.client_id
if request.get("iis", None) == "/replace_me":
request["iis"] = self.oauth.client_id
@pytest.mark.parametrize('jwt_claims, expected_response, expected_status_code', [
# 1. Incorrect JWT algorithm using “HS256” instead of “RS512”
(
{
'kid': 'test-1',
'algorithm': 'HS256',
},
{
'error': 'invalid_request',
'error_description': "Invalid 'alg' header in JWT - unsupported JWT algorithm - must be 'RS512'"
},
400
),
# 2. Invalid “sub” & “iss” in jwt claims
(
{
'kid': 'test-1',
'claims': {
"sub": 'INVALID',
"iss": 'INVALID',
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Invalid iss/sub claims in JWT'},
401
),
# 3. Invalid “sub” in jwt claims and different from “iss”
(
{
'kid': 'test-1',
'claims': {
"sub": 'INVALID',
"iss": "/replace_me",
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Missing or non-matching iss/sub claims in JWT'},
400
),
# 4. Invalid “iss” in jwt claims and different from “sub"
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": 'INVALID',
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Missing or non-matching iss/sub claims in JWT'},
400
),
# 5. Missing “sub” in jwt claims
(
{
'kid': 'test-1',
'claims': {
"iss": "/replace_me",
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Missing or non-matching iss/sub claims in JWT'},
400
),
# 6. Missing “iss” in jwt claims
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Missing or non-matching iss/sub claims in JWT'},
400
),
# 7. Invalid “jti” in jwt claims e.g using an INT type instead of a STRING
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": "/replace_me",
"jti": 1234567890,
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Failed to decode JWT'},
400
),
# 8. Missing “jti” in jwt claims
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": "/replace_me",
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Missing jti claim in JWT'},
400
),
# 9. Invalid “aud” in jwt claims
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": "/replace_me",
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token" + 'INVALID',
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Missing or invalid aud claim in JWT'},
401
),
# 10. Missing “aud” in jwt claims
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": "/replace_me",
"jti": str(uuid4()),
"exp": int(time()) + 10,
}
},
{'error': 'invalid_request', 'error_description': 'Missing or invalid aud claim in JWT'},
401
),
# 11. Invalid “exp” in jwt claims e.g. using a STRING type
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": "/replace_me",
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": 'INVALID',
}
},
{'error': 'invalid_request', 'error_description': 'Failed to decode JWT'},
400
),
# 12. Missing “exp” in jwt claims
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": "/replace_me",
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
}
},
{'error': 'invalid_request', 'error_description': 'Missing exp claim in JWT'},
400
),
# 13. “Exp” in the past
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": "/replace_me",
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) - 10,
}
},
{'error': 'invalid_request', 'error_description': 'Invalid exp claim in JWT - JWT has expired'},
400
),
# 14. “Exp” too far into the future (more than 5 minuets)
(
{
'kid': 'test-1',
'claims': {
"sub": "/replace_me",
"iss": "/replace_me",
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 330, # this includes the +30 seconds grace
}
},
{'error': 'invalid_request',
'error_description': 'Invalid exp claim in JWT - more than 5 minutes in future'},
400
)
])
@pytest.mark.apm_1521
@pytest.mark.errors
async def test_invalid_jwt_claims(self, jwt_claims, expected_response, expected_status_code, helper):
self._update_secrets(jwt_claims)
jwt = self.oauth.create_jwt(**jwt_claims)
resp = await self.oauth.get_token_response(grant_type='client_credentials', _jwt=jwt)
assert helper.check_response(resp, expected_status_code, expected_response)
@pytest.mark.apm_1521
@pytest.mark.errors
async def test_reusing_same_jti(self, helper):
jwt = self.oauth.create_jwt(claims={
"sub": self.oauth.client_id,
"iss": self.oauth.client_id,
"jti": '6cd46139-af51-4f78-b850-74fcdf70c75b',
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 10,
},
kid="test-1",
)
resp = await self.oauth.get_token_response(grant_type='client_credentials', _jwt=jwt)
if resp['status_code'] == 200:
resp = await self.oauth.get_token_response(grant_type='client_credentials', _jwt=jwt)
assert helper.check_response(
resp, 400, {'error': 'invalid_request', 'error_description': 'Non-unique jti claim in JWT'})
@pytest.mark.happy_path
async def test_successful_jwt_token_response(self):
jwt = self.oauth.create_jwt(kid="test-1")
resp = await self.oauth.get_token_response("client_credentials", _jwt=jwt)
try:
assert resp['body']['expires_in'] == '599', f"UNEXPECTED 'expires_in' {resp['expires_in']}"
except KeyError:
print(f"UNEXPECTED RESPONSE: {resp}")
return False
assert list(resp['body'].keys()) == ['access_token', 'expires_in', 'token_type', 'issued_at'], \
f'UNEXPECTED RESPONSE: {list(resp["body"].keys())}'
@pytest.mark.apm_1521
@pytest.mark.errors
@pytest.mark.parametrize('form_data, expected_response', [
# Invalid formdata “client_assertion_type”
(
{
"client_assertion_type": "INVALID",
"grant_type": "client_credentials",
},
{
'error': 'invalid_request',
'error_description': "Missing or invalid client_assertion_type - "
"must be 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
}
),
# Missing formdata “client_assertion_type”
(
{
"grant_type": "client_credentials",
},
{
'error': 'invalid_request',
'error_description': "Missing or invalid client_assertion_type - "
"must be 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
}
),
# Invalid formdata “client_assertion”
(
{
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"client_assertion": "INVALID",
"grant_type": "client_credentials",
},
{'error': 'invalid_request', 'error_description': 'Malformed JWT in client_assertion'}
),
# Missing formdata “client_assertion”
(
{
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"grant_type": "client_credentials",
},
{'error': 'invalid_request', 'error_description': 'Missing client_assertion'}
),
# Invalid formdata “grant_type”
(
{
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
"grant_type": "INVALID",
},
{'error': 'unsupported_grant_type', 'error_description': 'grant_type is invalid'}
),
# Missing formdata "grant_type"
(
{
"client_assertion_type": "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
},
{
'error': 'invalid_request',
'error_description': 'grant_type is missing'
}
)
])
async def test_invalid_form_data(self, form_data, expected_response):
jwt = self.oauth.create_jwt(kid="test-1")
resp = await self.oauth.get_token_response("client_credentials", _jwt=jwt, data=form_data)
assert resp['status_code'] == 400
assert resp['body'] == expected_response
@pytest.mark.apm_1521
@pytest.mark.errors
@pytest.mark.parametrize('jwt_details, expected_response, expected_status_code', [
# Invalid KID
(
{
'kid': 'INVALID',
},
{'error': 'invalid_request', 'error_description': "Invalid 'kid' header in JWT - no matching public key"},
401
),
# Missing KID Header
(
{
'kid': None,
},
{'error': 'invalid_request', 'error_description': "Missing 'kid' header in JWT"},
400
),
])
async def test_invalid_jwt(self, jwt_details, expected_response, expected_status_code):
jwt = self.oauth.create_jwt(**jwt_details)
resp = await self.oauth.get_token_response("client_credentials", _jwt=jwt)
assert resp['status_code'] == expected_status_code
assert resp['body'] == expected_response
@pytest.mark.skip("Fails in the pipeline")
async def test_manipulated_jwt_json(self):
jwt = self.oauth.create_jwt(kid='test-1')
chars = choice(ascii_letters) + choice(ascii_letters)
resp = await self.oauth.get_token_response(grant_type="client_credentials", _jwt=f"{jwt[:-2]}{chars}")
assert resp['status_code'] == 400
assert resp['body'] == {'error': 'invalid_request', 'error_description': 'Malformed JWT in client_assertion'}
async def test_invalid_jwks_resource_url(self, test_app):
test_app.set_custom_attributes(attributes={"jwks_resource_url": "http://invalid_url"})
jwt = self.oauth.create_jwt(kid='test-1', client_id=test_app.client_id)
resp = await self.oauth.get_token_response("client_credentials", _jwt=jwt)
assert resp['status_code'] == 403
assert resp['body'] == {
'error': 'public_key error',
'error_description': 'You need to register a public key to use this '
'authentication method - please contact support to configure'
}
@pytest.mark.happy_path
@pytest.mark.token_exchange
async def test_token_exchange_happy_path(self):
# Given
expected_status_code = 200
expected_expires_in = '599'
expected_token_type = 'Bearer'
expected_issued_token_type = 'urn:ietf:params:oauth:token-type:access_token'
id_token_jwt = self.oauth.create_id_token_jwt()
client_assertion_jwt = self.oauth.create_jwt(kid='test-1')
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
_jwt=client_assertion_jwt,
id_token_jwt=id_token_jwt
)
# Then
assert expected_status_code == resp['status_code'], resp['body']
assert 'access_token' in resp['body']
assert expected_expires_in == resp['body']['expires_in']
assert expected_token_type == resp['body']['token_type']
assert expected_issued_token_type == resp['body']['issued_token_type']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_invalid_client_assertion_type(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing or invalid client_assertion_type - " \
"must be 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer"
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'client_assertion_type': 'Invalid',
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token'
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_invalid_subject_token_type(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "missing or invalid subject_token_type - " \
"must be 'urn:ietf:params:oauth:token-type:id_token'"
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token_type': 'Invalid',
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange'
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_claims_assertion_invalid_kid(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing 'kid' header in JWT"
client_assertion_jwt = self.oauth.create_jwt(kid=None)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_claims_assertion_invalid_typ_header(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Invalid 'typ' header in JWT - must be 'JWT'"
client_assertion_jwt = self.oauth.create_jwt(kid="test-1", headers={'typ': 'invalid'})
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_claims_assertion_invalid_iss_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing or non-matching iss/sub claims in JWT"
client_assertion_jwt = self.oauth.create_jwt(kid="test-1", claims={
"sub": '',
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 5,
})
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_claims_assertion_missing_jti_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing jti claim in JWT"
client_assertion_jwt = self.oauth.create_jwt(kid="test-1", claims={
"sub": self.oauth.client_id,
"iss": self.oauth.client_id,
"jti": '',
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 5,
})
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_claims_assertion_missing_exp_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing exp claim in JWT"
client_assertion_jwt = self.oauth.create_jwt(kid="test-1", claims={
"sub": self.oauth.client_id,
"iss": self.oauth.client_id,
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
})
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_claims_assertion_invalid_exp_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Invalid exp claim in JWT - more than 5 minutes in future"
client_assertion_jwt = self.oauth.create_jwt(kid="test-1", claims={
"sub": self.oauth.client_id,
"iss": self.oauth.client_id,
"jti": str(uuid4()),
"aud": f"{OAUTH_URL}/token",
"exp": int(time()) + 50000,
})
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_claims_assertion_invalid_jti_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Non-unique jti claim in JWT"
id_token_claims = {
'at_hash': 'tf_-lqpq36lwO7WmSBIJ6Q',
'sub': '787807429511',
'auditTrackingId': '91f694e6-3749-42fd-90b0-c3134b0d98f6-1546391',
'amr': ['N3_SMARTCARD'],
'iss': 'https://am.nhsint.ptl.nhsd-esa.net:443/'
'openam/oauth2/realms/root/realms/NHSIdentity/realms/Healthcare',
'tokenName': 'id_token',
'aud': '969567331415.apps.national',
'c_hash': 'bc7zzGkClC3MEiFQ3YhPKg',
'acr': 'AAL3_ANY',
'org.forgerock.openidconnect.ops': '-I45NjmMDdMa-aNF2sr9hC7qEGQ',
's_hash': 'LPJNul-wow4m6Dsqxbning',
'azp': '969567331415.apps.national',
'auth_time': 1610559802,
'realm': '/NHSIdentity/Healthcare',
'exp': int(time()) + 600,
'tokenType': 'JWTToken',
'iat': int(time()) - 10
}
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(kid="identity-service-tests-1", claims=id_token_claims)
# When
await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Second request should fail
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_subject_token_missing_iss_or_sub_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing or non-matching iss/sub claims in JWT"
id_token_claims = {
'at_hash': 'tf_-lqpq36lwO7WmSBIJ6Q',
'sub': '787807429511',
'auditTrackingId': '91f694e6-3749-42fd-90b0-c3134b0d98f6-1546391',
'amr': ['N3_SMARTCARD'],
'tokenName': 'id_token',
'aud': '969567331415.apps.national',
'c_hash': 'bc7zzGkClC3MEiFQ3YhPKg',
'acr': 'AAL3_ANY',
'org.forgerock.openidconnect.ops': '-I45NjmMDdMa-aNF2sr9hC7qEGQ',
's_hash': 'LPJNul-wow4m6Dsqxbning',
'azp': '969567331415.apps.national',
'auth_time': 1610559802,
'realm': '/NHSIdentity/Healthcare',
'exp': int(time()) + 600,
'tokenType': 'JWTToken',
'iat': int(time()) - 10
}
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(kid="identity-service-tests-1", claims=id_token_claims)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_subject_token_missing_aud_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing aud claim in JWT"
id_token_claims = {
'at_hash': 'tf_-lqpq36lwO7WmSBIJ6Q',
'sub': '787807429511',
'auditTrackingId': '91f694e6-3749-42fd-90b0-c3134b0d98f6-1546391',
'iss': 'https://am.nhsint.ptl.nhsd-esa.net:443'
'/openam/oauth2/realms/root/realms/NHSIdentity/realms/Healthcare',
'amr': ['N3_SMARTCARD'],
'tokenName': 'id_token',
'c_hash': 'bc7zzGkClC3MEiFQ3YhPKg',
'acr': 'AAL3_ANY',
'org.forgerock.openidconnect.ops': '-I45NjmMDdMa-aNF2sr9hC7qEGQ',
's_hash': 'LPJNul-wow4m6Dsqxbning',
'azp': '969567331415.apps.national',
'auth_time': 1610559802,
'realm': '/NHSIdentity/Healthcare',
'exp': int(time()) + 600,
'tokenType': 'JWTToken',
'iat': int(time()) - 10
}
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(kid="identity-service-tests-1", claims=id_token_claims)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_subject_token_missing_exp_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing exp claim in JWT"
id_token_claims = {
'at_hash': 'tf_-lqpq36lwO7WmSBIJ6Q',
'sub': '787807429511',
'auditTrackingId': '91f694e6-3749-42fd-90b0-c3134b0d98f6-1546391',
'amr': ['N3_SMARTCARD'],
'iss': 'https://am.nhsint.ptl.nhsd-esa.net:443'
'/openam/oauth2/realms/root/realms/NHSIdentity/realms/Healthcare',
'tokenName': 'id_token',
'aud': '969567331415.apps.national',
'c_hash': 'bc7zzGkClC3MEiFQ3YhPKg',
'acr': 'AAL3_ANY',
'org.forgerock.openidconnect.ops': '-I45NjmMDdMa-aNF2sr9hC7qEGQ',
's_hash': 'LPJNul-wow4m6Dsqxbning',
'azp': '969567331415.apps.national',
'auth_time': 1610559802,
'realm': '/NHSIdentity/Healthcare',
#'exp': int(time()) + 600,
'tokenType': 'JWTToken',
'iat': int(time()) - 10
}
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(kid="identity-service-tests-1", claims=id_token_claims)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_nhs_login_happy_path(self):
# Given
expected_status_code = 200
expected_expires_in = '599'
expected_token_type = 'Bearer'
expected_issued_token_type = 'urn:ietf:params:oauth:token-type:access_token'
id_token_claims = {
'aud': 'tf_-APIM-1',
'id_status': 'verified',
'token_use': 'id',
'auth_time': 1616600683,
'iss': 'https://auth.sandpit.signin.nhs.uk',
'vot': 'P9.Cp.Cd',
'exp': int(time()) + 600,
'iat': int(time()) - 10,
'vtm' : 'https://auth.sandpit.signin.nhs.uk/trustmark/auth.sandpit.signin.nhs.uk',
'jti': 'b68ddb28-e440-443d-8725-dfe0da330118'
}
id_token_headers = {
"sub": "49f470a1-cc52-49b7-beba-0f9cec937c46",
"aud": "APIM-1",
"kid": "nhs-login",
"iss": "https://auth.sandpit.signin.nhs.uk",
"typ": "JWT",
"exp": 1616604574,
"iat": 1616600974,
"alg": "RS512",
"jti": "b68ddb28-e440-443d-8725-dfe0da330118"
}
with open(ID_TOKEN_NHS_LOGIN_PRIVATE_KEY_ABSOLUTE_PATH, "r") as f:
contents = f.read()
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(algorithm='RS512',claims=id_token_claims, headers = id_token_headers, signing_key=contents)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# # Then
assert expected_status_code == resp['status_code'], resp['body']
assert 'access_token' in resp['body']
assert expected_expires_in == resp['body']['expires_in']
assert expected_token_type == resp['body']['token_type']
assert expected_issued_token_type == resp['body']['issued_token_type']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_subject_token_nhs_login_missing_iss_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing or non-matching iss/sub claims in JWT"
id_token_claims = {
'aud': 'tf_-APIM-1',
'id_status': 'verified',
'token_use': 'id',
'auth_time': 1616600683,
# 'iss': 'https://auth.sandpit.signin.nhs.uk',
'vot': 'P9.Cp.Cd',
'exp': int(time()) + 600,
'iat': int(time()) - 10,
'vtm' : 'https://auth.sandpit.signin.nhs.uk/trustmark/auth.sandpit.signin.nhs.uk',
'jti': 'b68ddb28-e440-443d-8725-dfe0da330118'
}
id_token_headers = {
"sub": "49f470a1-cc52-49b7-beba-0f9cec937c46",
"aud": "APIM-1",
"kid": "nhs-login",
"iss": "https://auth.sandpit.signin.nhs.uk",
"typ": "JWT",
"exp": 1616604574,
"iat": 1616600974,
"alg": "RS512",
"jti": "b68ddb28-e440-443d-8725-dfe0da330118"
}
with open(ID_TOKEN_NHS_LOGIN_PRIVATE_KEY_ABSOLUTE_PATH, "r") as f:
contents = f.read()
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(algorithm='RS512',claims=id_token_claims, headers = id_token_headers, signing_key=contents)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_subject_token_nhs_login_missing_aud_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing aud claim in JWT"
id_token_claims = {
# 'aud': 'tf_-APIM-1',
'id_status': 'verified',
'token_use': 'id',
'auth_time': 1616600683,
'iss': 'https://auth.sandpit.signin.nhs.uk',
'vot': 'P9.Cp.Cd',
'exp': int(time()) + 600,
'iat': int(time()) - 10,
'vtm' : 'https://auth.sandpit.signin.nhs.uk/trustmark/auth.sandpit.signin.nhs.uk',
'jti': 'b68ddb28-e440-443d-8725-dfe0da330118'
}
id_token_headers = {
"sub": "49f470a1-cc52-49b7-beba-0f9cec937c46",
"aud": "APIM-1",
"kid": "nhs-login",
"iss": "https://auth.sandpit.signin.nhs.uk",
"typ": "JWT",
"exp": 1616604574,
"iat": 1616600974,
"alg": "RS512",
"jti": "b68ddb28-e440-443d-8725-dfe0da330118"
}
with open(ID_TOKEN_NHS_LOGIN_PRIVATE_KEY_ABSOLUTE_PATH, "r") as f:
contents = f.read()
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(algorithm='RS512',claims=id_token_claims, headers = id_token_headers, signing_key=contents)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_subject_token_nhs_login_missing_exp_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing exp claim in JWT"
id_token_claims = {
'aud': 'tf_-APIM-1',
'id_status': 'verified',
'token_use': 'id',
'auth_time': 1616600683,
'iss': 'https://auth.sandpit.signin.nhs.uk',
'vot': 'P9.Cp.Cd',
# 'exp': int(time()) + 600,
'iat': int(time()) - 10,
'vtm' : 'https://auth.sandpit.signin.nhs.uk/trustmark/auth.sandpit.signin.nhs.uk',
'jti': 'b68ddb28-e440-443d-8725-dfe0da330118'
}
id_token_headers = {
"sub": "49f470a1-cc52-49b7-beba-0f9cec937c46",
"aud": "APIM-1",
"kid": "nhs-login",
"iss": "https://auth.sandpit.signin.nhs.uk",
"typ": "JWT",
"exp": 1616604574,
"iat": 1616600974,
"alg": "RS512",
"jti": "b68ddb28-e440-443d-8725-dfe0da330118"
}
with open(ID_TOKEN_NHS_LOGIN_PRIVATE_KEY_ABSOLUTE_PATH, "r") as f:
contents = f.read()
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(algorithm='RS512',claims=id_token_claims, headers = id_token_headers, signing_key=contents)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description']
@pytest.mark.errors
@pytest.mark.token_exchange
async def test_token_exchange_subject_token_nhs_login_invalid_iss_claim(self):
# Given
expected_status_code = 400
expected_error = 'invalid_request'
expected_error_description = "Missing or non-matching iss/sub claims in JWT"
id_token_claims = {
'aud': 'tf_-APIM-1',
'id_status': 'verified',
'token_use': 'id',
'auth_time': 1616600683,
'iss': 'invalidIss',
'vot': 'P9.Cp.Cd',
'exp': int(time()) + 600,
'iat': int(time()) - 10,
'vtm' : 'https://auth.sandpit.signin.nhs.uk/trustmark/auth.sandpit.signin.nhs.uk',
'jti': 'b68ddb28-e440-443d-8725-dfe0da330118'
}
id_token_headers = {
"sub": "49f470a1-cc52-49b7-beba-0f9cec937c46",
"aud": "APIM-1",
"kid": "nhs-login",
"iss": "InvalidIss",
"typ": "JWT",
"exp": 1616604574,
"iat": 1616600974,
"alg": "RS512",
"jti": "b68ddb28-e440-443d-8725-dfe0da330118"
}
with open(ID_TOKEN_NHS_LOGIN_PRIVATE_KEY_ABSOLUTE_PATH, "r") as f:
contents = f.read()
client_assertion_jwt = self.oauth.create_jwt(kid="test-1")
id_token_jwt = self.oauth.create_id_token_jwt(algorithm='RS512',claims=id_token_claims, headers = id_token_headers, signing_key=contents)
# When
resp = await self.oauth.get_token_response(
grant_type="token_exchange",
data={
'grant_type': 'urn:ietf:params:oauth:grant-type:token-exchange',
'subject_token_type': 'urn:ietf:params:oauth:token-type:id_token',
'client_assertion_type': 'urn:ietf:params:oauth:client-assertion-type:jwt-bearer',
'subject_token': id_token_jwt,
'client_assertion': client_assertion_jwt
}
)
# Then
assert expected_status_code == resp['status_code']
assert expected_error == resp['body']['error']
assert expected_error_description == resp['body']['error_description'] | python | 18 | 0.529836 | 145 | 38.143234 | 1,138 | A test suit to verify all the happy path oauth endpoints | class |
class ProxyServer:
"""Main proxy server which creates a TLS socket and listens for clients.
If clients connect the server will start a TunnelServer"""
def __init__(
self,
host: Union[str, List[str]],
port: int,
cert: str,
key: str,
ca: str = None,
http_domain: str = None,
**kwargs,
):
self.kwargs = kwargs
self.host, self.port = host, port
self.max_tunnels = config["max-tunnels"]
self.http_host, self.http_port = config["http-listen"]
self.tunnels = {}
self.sc = utils.generate_ssl_context(cert=cert, key=key, ca=ca, server=True)
if isinstance(http_domain, str):
self.http_domain = http_domain
self.http_domain_regex = re.compile(
rb"^(.*)\.%s$" % http_domain.replace(".", r"\.").encode()
)
else:
self.http_domain = self.http_domain_regex = False
async def _accept(self, reader: StreamReader, writer: StreamWriter) -> None:
""" Accept new tunnels and start to listen for clients """
# Limit the number of tunnels
if 0 < self.max_tunnels <= len(self.tunnels):
return
# Create the tunnel object and generate an unique token
tunnel = TunnelServer(reader, writer, domain=self.http_domain, **self.kwargs)
self.tunnels[tunnel.uuid] = tunnel
try:
await tunnel.loop()
finally:
self.tunnels.pop(tunnel.uuid)
async def _request(self, reader: StreamReader, writer: StreamWriter) -> None:
""" Handle http requests and try to proxy them to the specific tunnel """
buf = await reader.readline()
status = buf.strip()
match = HTTPRequestStatus.match(status)
if not match:
await self.close(reader, writer)
return
version = match.groups()[2]
# Read the HTTP headers
headers = {}
while not reader.at_eof():
line = await reader.readline()
buf += line
stripped = line.strip()
if not stripped:
break
if b":" in stripped:
header, value = (x.strip() for x in stripped.split(b":", 1))
headers[header] = value
# Extract the host from the headers and try matching them
host = headers.get(b"X-Forwarded-Host", headers.get(b"Host", b""))
match = self.http_domain_regex.match(host)
if not match or len(match.groups()) < 1:
writer.write(b"%s 404 Not Found\r\n\r\n" % version)
await writer.drain()
await self.close(reader, writer)
return
# Find the right tunnel for the host
tun_uuid = match.groups()[0].decode()
if tun_uuid not in self.tunnels:
writer.write(b"%s 404 Not Found\r\n\r\n" % version)
await writer.drain()
await self.close(reader, writer)
return
# Get the tunnel and accept the client if set to HTTP protocol
tunnel = self.tunnels[tun_uuid]
if tunnel.protocol == base.ProtocolType.HTTP:
await tunnel._client_accept(reader, writer, buf)
else:
writer.write(b"%s 404 Not Found\r\n\r\n" % version)
await writer.drain()
await self.close(reader, writer)
async def http_loop(self) -> None:
""" Main server loop for the http socket """
host = self.http_host
for h in host if isinstance(host, list) else [host]:
_logger.info("Serving on %s:%s [HTTP]", h, self.http_port)
self.http_proxy = await asyncio.start_server(
self._request,
self.http_host,
self.http_port,
)
async with self.http_proxy:
await self.http_proxy.serve_forever()
async def loop(self) -> None:
""" Main server loop to wait for tunnels to open """
if self.http_domain_regex:
asyncio.create_task(self.http_loop())
self.server = await asyncio.start_server(
self._accept,
self.host,
self.port,
ssl=self.sc,
)
for host in self.host if isinstance(self.host, list) else [self.host]:
_logger.info("Serving on %s:%s", host, self.port)
async with self.server:
await self.server.serve_forever()
def start(self) -> None:
""" Start the server and event loop """
_logger.info("Starting server...")
asyncio.run(self.loop())
async def stop(self) -> None:
""" Stop the server and event loop """
for tunnel in self.tunnels.values():
await tunnel.stop()
self.server.close()
await self.server.wait_closed()
async def close(self, reader: StreamReader, writer: StreamWriter) -> None:
""" Close a StreamReader and StreamWriter """
reader.feed_eof()
writer.close()
await writer.wait_closed() | python | 17 | 0.561546 | 85 | 33.561644 | 146 | Main proxy server which creates a TLS socket and listens for clients.
If clients connect the server will start a TunnelServer | class |
class Patch_loader :
"""
Class Patch_loader
======================
Defines loading functions for the patch database
"""
def __init__(self, directory, size = 64, seed=42,
only_green=True) :
# data init
self.dir = directory # directory with the train / test / validation sudirectories
self.size = size # size of the sub image that should be croped
self.nb_channels = 3 # return only the green channel of the images
if(only_green == True) :
self.nb_channels = 1
self.file_train = [] # list of the train images : tuple (image name / class)
self.file_test = [] # list of the test images : tuple (image name / class)
self.file_validation = [] # list of the validation images : tuple (image name / class)
self.image_class = ['original', 'modified'] # list of the class (label) used in the process
self.nb_class = len(self.image_class)
self.train_iterator = 0 # iterator over the train images
self.test_iterator = 0 # iterator over the test images
self.validation_iterator = 0 # iterator over the validation images
self.load_images(seed) # load the data base
def extract_channel(self, rgb_image, channel=1) :
if channel > 2 :
channel = 2
return rgb_image[:,:,channel]
def get_immediate_subdirectories(self,a_dir) :
# return the list of sub directories of a directory
return [name for name in os.listdir(a_dir)
if os.path.isdir(os.path.join(a_dir, name))]
def load_images_in_dir(self, dir_name, image_class) :
# file extension accepted as image data
proportion = self.proportion
valid_image_extension = [".jpg", ".JPG", ".jpeg"]
file_list = []
for c in image_class :
nb_image_per_class = 0
file_list_by_class = []
for filename in os.listdir(dir_name+'/'+c):
# check if the file is an image
extension = os.path.splitext(filename)[1]
if extension.lower() in valid_image_extension:
file_list_by_class.append(filename)
for i in range(int(len(file_list_by_class)*proportion)):
file_list.append((file_list_by_class[i],c))
nb_image_per_class += 1
print(' ',c,nb_image_per_class,'images loaded')
return file_list
def load_images(self, seed) :
# check if train / test / validation directories exists
train_dir_name = self.dir + '/train'
if not os.path.exists(train_dir_name):
print("error: train directory does not exist")
sys.exit(0)
return
validation_dir_name = self.dir + '/validation'
if not os.path.exists(validation_dir_name):
print("error: validation directory does not exist")
sys.exit(0)
return
test_dir_name = self.dir + '/test'
if not os.path.exists(test_dir_name):
print("error: test directory does not exist")
return []
sys.exit(0)
# count number of classes
# self.image_class = self.get_immediate_subdirectories(train_dir_name)
print(' number of classes :', self.nb_class, ' ', self.image_class)
# load image file name and class
print("\n train data")
self.file_train = self.load_images_in_dir(train_dir_name,self.image_class)
print("\n test data")
self.file_test = self.load_images_in_dir(test_dir_name,self.image_class)
print("\n validation data")
self.file_validation = self.load_images_in_dir(validation_dir_name,self.image_class)
# shuffle the lists
print("\n shuffle lists ...")
random.seed(seed)
random.shuffle(self.file_train)
random.shuffle(self.file_test)
random.shuffle(self.file_validation)
#print(self.file_train)
#print("\n loading done.")
def get_next_image(self, directory, verbose = False) :
if directory not in set(['train', 'test', 'validation']):
print("error: directory does not exist")
return()
sys.exit(0)
# load next image (size should be big enough)
image = []
# pop file name and class
if directory == 'train':
data = self.file_train[self.train_iterator]
self.train_iterator += 1
if self.train_iterator >= len(self.file_train) :
self.train_iterator = 0
if directory == 'test':
data = self.file_test[self.test_iterator]
self.test_iterator += 1
if self.test_iterator >= len(self.file_test) :
self.test_iterator = 0
if directory == 'validation':
data = self.file_validation[self.validation_iterator]
self.validation_iterator += 1
if self.validation_iterator >= len(self.file_validation) :
self.validation_iterator = 0
# load image
file_name = self.dir + '/' + directory + '/' + data[1] + '/' + data[0]
image = Image.open(file_name)
if(verbose) :
print(" ", file_name)
print( ' index :', self.train_iterator -1)
print( ' width :', image.size[0] )
print( ' height :', image.size[1] )
print( ' mode :', image.mode )
print( ' format :', image.format )
image = np.asarray(image)
if( self.nb_channels == 1 and len(image.shape) > 2 ) :
image = self.extract_channel(image,1)
# convert to float image
image = image.astype(np.float32) / 255.
#image = image.reshape(1, self.size, self.size, 3)
image = image.reshape(self.size, self.size, self.nb_channels)
# build class label
label = np.zeros(len(self.image_class))
pos = self.image_class.index(data[1])
label[pos] = 1.0
# return image and label
return (image, label)
def get_intrablock(self, image):
if self.size%8 != 0:
print('Incorect size... Not divided by block size (8)')
return()
blocks = []
for k in range(self.nb_channels):
for i in range(int(self.size/8)):
for j in range(int(self.size/8)):
blocks.append(image[8*i:8*(i+1), 8*j:8*(j+1), k])
blocks = np.array(blocks)
return(blocks)
def get_interblock(self, image):
if self.size%8 != 0:
print('Incorect size... Not divided by block size (8)')
return()
blocks = []
for k in range(self.nb_channels):
for i in range(int(self.size/8) - 1):
for j in range(int(self.size/8) - 1):
blocks.append(image[4 + 8*i: 4 + 8*(i+1), 4 + 8*j: 4 + 8*(j+1), k])
blocks = np.array(blocks)
return(blocks)
def get_next_train_batch(self, batch_size = 64):
nb_intra = int(self.size**2/(8**2)*self.nb_channels)
nb_inter = int((self.size**2/(8**2) - 2*self.size/8 + 1)*self.nb_channels)
batch_image_intra = np.empty([batch_size, self.size, self.size, nb_intra])
batch_image_inter = np.empty([batch_size, self.size, self.size, nb_inter])
batch_label = np.empty([batch_size, self.nb_class])
for i in range(batch_size):
image, label = self.get_next_image(directory = 'train')
batch_label[i] = label
batch_image_intra[i] = get_intrablock(image)
batch_image_inter[i] = get_interblock(image)
return(batch_image_intra, batch_image_inter, batch_label)
def get_next_test_batch(self, batch_size = 64):
nb_intra = int(self.size**2/(8**2)*self.nb_channels)
nb_inter = int((self.size**2/(8**2) - 2*self.size/8 + 1)*self.nb_channels)
batch_image_intra = np.empty([batch_size, self.size, self.size, nb_intra])
batch_image_inter = np.empty([batch_size, self.size, self.size, nb_inter])
batch_label = np.empty([batch_size, self.nb_class])
for i in range(batch_size):
image, label = self.get_next_image(directory = 'test')
batch_label[i] = label
batch_image_intra[i] = get_intrablock(image)
batch_image_inter[i] = get_interblock(image)
return(batch_image_intra, batch_image_inter, batch_label)
def get_next_validation_batch(self, batch_size = 64):
nb_intra = int(self.size**2/(8**2)*self.nb_channels)
nb_inter = int((self.size**2/(8**2) - 2*self.size/8 + 1)*self.nb_channels)
batch_image_intra = np.empty([batch_size, self.size, self.size, nb_intra])
batch_image_inter = np.empty([batch_size, self.size, self.size, nb_inter])
batch_label = np.empty([batch_size, self.nb_class])
for i in range(batch_size):
image, label = self.get_next_image(directory = 'validation')
batch_label[i] = label
batch_image_intra[i] = get_intrablock(image)
batch_image_inter[i] = get_interblock(image)
return(batch_image_intra, batch_image_inter, batch_label) | python | 22 | 0.605224 | 107 | 36.304721 | 233 |
Class Patch_loader
======================
Defines loading functions for the patch database
| class |
class PrintMode:
"""handles output format.
This will print ints.
Inherit from this class to override the dot method.
"""
change_message = "Now printing integers (base 10)\n"
def __init__(self, file=sys.stdout):
self.file = file
def dot(self, value):
"""print the given value as integer in base 10
:value: (int) the value to print
"""
self.file.write(str(value) + ";")
self.file.flush() | python | 11 | 0.589247 | 56 | 24.888889 | 18 | handles output format.
This will print ints.
Inherit from this class to override the dot method.
| class |