id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
299,700 | test time indices | import torch
from diffusers import DDIMInverseScheduler
from .test_schedulers import SchedulerCommonTest
class DDIMInverseSchedulerTest(SchedulerCommonTest):
scheduler_classes = (DDIMInverseScheduler,)
forward_default_kwargs = (("eta", 0.0), ("num_inference_steps", 50))
def get_scheduler_config(self, **kwargs):
config = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**kwargs)
return config
def full_loop(self, **config):
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(**config)
scheduler = scheduler_class(**scheduler_config)
num_inference_steps, eta = 10, 0.0
model = self.dummy_model()
sample = self.dummy_sample_deter
scheduler.set_timesteps(num_inference_steps)
for t in scheduler.timesteps:
residual = model(sample, t)
sample = scheduler.step(residual, t, sample, eta).prev_sample
return sample
def test_timesteps(self):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=timesteps)
def test_steps_offset(self):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=steps_offset)
scheduler_class = self.scheduler_classes[0]
scheduler_config = self.get_scheduler_config(steps_offset=1)
scheduler = scheduler_class(**scheduler_config)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps, torch.LongTensor([-199, 1, 201, 401, 601]))
def test_betas(self):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=beta_start, beta_end=beta_end)
def test_schedules(self):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=schedule)
def test_prediction_type(self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=prediction_type)
def test_clip_sample(self):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=clip_sample)
def test_timestep_spacing(self):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=timestep_spacing)
def test_rescale_betas_zero_snr(self):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=rescale_betas_zero_snr)
def test_thresholding(self):
self.check_over_configs(thresholding=False)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=True,
prediction_type=prediction_type,
sample_max_value=threshold,
)
def METHOD_NAME(self):
for t in [1, 10, 49]:
self.check_over_forward(time_step=t)
def test_inference_steps(self):
for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]):
self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps)
def test_add_noise_device(self):
pass
def test_full_loop_no_noise(self):
sample = self.full_loop()
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 509.1079) < 1e-2
assert abs(result_mean.item() - 0.6629) < 1e-3
def test_full_loop_with_v_prediction(self):
sample = self.full_loop(prediction_type="v_prediction")
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 1029.129) < 1e-2
assert abs(result_mean.item() - 1.3400) < 1e-3
def test_full_loop_with_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 259.8116) < 1e-2
assert abs(result_mean.item() - 0.3383) < 1e-3
def test_full_loop_with_no_set_alpha_to_one(self):
# We specify different beta, so that the first alpha is 0.99
sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01)
result_sum = torch.sum(torch.abs(sample))
result_mean = torch.mean(torch.abs(sample))
assert abs(result_sum.item() - 239.055) < 1e-2
assert abs(result_mean.item() - 0.3113) < 1e-3 |
299,701 | encode sku fields | import argparse
import random
from datetime import datetime
import dgl
import networkx as nx
import numpy as np
import torch as th
def init_args():
# TODO: change args
argparser = argparse.ArgumentParser()
argparser.add_argument("--session_interval_sec", type=int, default=1800)
argparser.add_argument(
"--action_data", type=str, default="data/action_head.csv"
)
argparser.add_argument(
"--item_info_data", type=str, default="data/jdata_product.csv"
)
argparser.add_argument("--walk_length", type=int, default=10)
argparser.add_argument("--num_walks", type=int, default=5)
argparser.add_argument("--batch_size", type=int, default=64)
argparser.add_argument("--dim", type=int, default=16)
argparser.add_argument("--epochs", type=int, default=30)
argparser.add_argument("--window_size", type=int, default=2)
argparser.add_argument("--num_negative", type=int, default=5)
argparser.add_argument("--lr", type=float, default=0.001)
argparser.add_argument("--log_every", type=int, default=100)
return argparser.parse_args()
def construct_graph(datapath, session_interval_gap_sec, valid_sku_raw_ids):
user_clicks, sku_encoder, sku_decoder = parse_actions(
datapath, valid_sku_raw_ids
)
# {src,dst: weight}
graph = {}
for user_id, action_list in user_clicks.items():
# sort by action time
_action_list = sorted(action_list, key=lambda x: x[1])
last_action_time = datetime.strptime(
_action_list[0][1], "%Y-%m-%d %H:%M:%S"
)
session = [_action_list[0][0]]
# cut sessions and add to graph
for sku_id, action_time in _action_list[1:]:
action_time = datetime.strptime(action_time, "%Y-%m-%d %H:%M:%S")
gap = action_time - last_action_time
if gap.seconds < session_interval_gap_sec:
session.append(sku_id)
else:
# here we have a new session
# add prev session to graph
add_session(session, graph)
# create a new session
session = [sku_id]
# add last session
add_session(session, graph)
g = convert_to_dgl_graph(graph)
return g, sku_encoder, sku_decoder
def convert_to_dgl_graph(graph):
# directed graph
g = nx.DiGraph()
for edge, weight in graph.items():
nodes = edge.split(",")
src, dst = int(nodes[0]), int(nodes[1])
g.add_edge(src, dst, weight=float(weight))
return dgl.from_networkx(g, edge_attrs=["weight"])
def add_session(session, graph):
"""
For session like:
[sku1, sku2, sku3]
add 1 weight to each of the following edges:
sku1 -> sku2
sku2 -> sku3
If sesson length < 2, no nodes/edges will be added
"""
for i in range(len(session) - 1):
edge = str(session[i]) + "," + str(session[i + 1])
try:
graph[edge] += 1
except KeyError:
graph[edge] = 1
def parse_actions(datapath, valid_sku_raw_ids):
user_clicks = {}
with open(datapath, "r") as f:
f.readline()
# raw_id -> new_id and new_id -> raw_id
sku_encoder, sku_decoder = {}, []
sku_id = -1
for line in f:
line = line.replace("\n", "")
fields = line.split(",")
action_type = fields[-1]
# actually, all types in the dataset is "1"
if action_type == "1":
user_id = fields[0]
sku_raw_id = fields[1]
if sku_raw_id in valid_sku_raw_ids:
action_time = fields[2]
# encode sku_id
sku_id = encode_id(
sku_encoder, sku_decoder, sku_raw_id, sku_id
)
# add to user clicks
try:
user_clicks[user_id].append((sku_id, action_time))
except KeyError:
user_clicks[user_id] = [(sku_id, action_time)]
return user_clicks, sku_encoder, sku_decoder
def encode_id(encoder, decoder, raw_id, encoded_id):
if raw_id in encoder:
return encoded_id
else:
encoded_id += 1
encoder[raw_id] = encoded_id
decoder.append(raw_id)
return encoded_id
def get_valid_sku_set(datapath):
sku_ids = set()
with open(datapath, "r") as f:
for line in f.readlines():
line.replace("\n", "")
sku_raw_id = line.split(",")[0]
sku_ids.add(sku_raw_id)
return sku_ids
def METHOD_NAME(datapath, sku_encoder, sku_decoder):
# sku_id,brand,shop_id,cate,market_time
sku_info_encoder = {"brand": {}, "shop": {}, "cate": {}}
sku_info_decoder = {"brand": [], "shop": [], "cate": []}
sku_info = {}
brand_id, shop_id, cate_id = -1, -1, -1
with open(datapath, "r") as f:
f.readline()
for line in f:
line = line.replace("\n", "")
fields = line.split(",")
sku_raw_id = fields[0]
brand_raw_id = fields[1]
shop_raw_id = fields[2]
cate_raw_id = fields[3]
if sku_raw_id in sku_encoder:
sku_id = sku_encoder[sku_raw_id]
brand_id = encode_id(
sku_info_encoder["brand"],
sku_info_decoder["brand"],
brand_raw_id,
brand_id,
)
shop_id = encode_id(
sku_info_encoder["shop"],
sku_info_decoder["shop"],
shop_raw_id,
shop_id,
)
cate_id = encode_id(
sku_info_encoder["cate"],
sku_info_decoder["cate"],
cate_raw_id,
cate_id,
)
sku_info[sku_id] = [sku_id, brand_id, shop_id, cate_id]
return sku_info_encoder, sku_info_decoder, sku_info
class TestEdge:
def __init__(self, src, dst, label):
self.src = src
self.dst = dst
self.label = label
def split_train_test_graph(graph):
"""
For test true edges, 1/3 of the edges are randomly chosen
and removed as ground truth in the test set,
the remaining graph is taken as the training set.
"""
test_edges = []
neg_sampler = dgl.dataloading.negative_sampler.Uniform(1)
sampled_edge_ids = random.sample(
range(graph.num_edges()), int(graph.num_edges() / 3)
)
for edge_id in sampled_edge_ids:
src, dst = graph.find_edges(edge_id)
test_edges.append(TestEdge(src, dst, 1))
src, dst = neg_sampler(graph, th.tensor([edge_id]))
test_edges.append(TestEdge(src, dst, 0))
graph.remove_edges(sampled_edge_ids)
test_graph = test_edges
return graph, test_graph |
299,702 | register | """
Authentication module that uses Spacewalk's auth system.
Any org_admin or kickstart_admin can get in.
"""
# SPDX-License-Identifier: GPL-2.0-or-later
# SPDX-FileCopyrightText: Copyright 2007-2009, Red Hat, Inc and Others
# SPDX-FileCopyrightText: Michael DeHaan <michael.dehaan AT gmail>
from typing import TYPE_CHECKING
from xmlrpc.client import Error, ServerProxy
from cobbler.cexceptions import CX
from cobbler.utils import log_exc
if TYPE_CHECKING:
from cobbler.api import CobblerAPI
def METHOD_NAME() -> str:
"""
The mandatory Cobbler module registration hook.
"""
return "authn"
def __looks_like_a_token(password: str) -> bool:
"""
What spacewalk sends us could be an internal token or it could be a password if it's long and lowercase hex, it's
/likely/ a token, and we should try to treat it as a token first, if not, we should treat it as a password. All of
this code is there to avoid extra XMLRPC calls, which are slow.
A password gets detected as a token if it is lowercase and under 45 characters.
:param password: The password which is possibly a token.
:return: True if it is possibly a token or False otherwise.
"""
return password.lower() == password and len(password) > 45
def __check_auth_token(
xmlrpc_client: "ServerProxy", api_handle: "CobblerAPI", username: str, password: str
):
"""
This checks if the auth token is valid.
:param xmlrpc_client: The xmlrpc client to check access for.
:param api_handle: The api instance to retrieve settings of.
:param username: The username to try.
:param password: The password to try.
:return: In any error case this will return 0. Otherwise, the return value of the API which should be 1.
"""
# If the token is not a token this will raise an exception rather than return an integer.
try:
return xmlrpc_client.auth.checkAuthToken(username, password)
except Error:
logger = api_handle.logger
logger.error("Error while checking authentication token.")
log_exc()
return False
def __check_user_login(
xmlrpc_client: ServerProxy,
api_handle: "CobblerAPI",
user_enabled: bool,
username: str,
password: str,
) -> bool:
"""
This actually performs the login to spacewalk.
:param xmlrpc_client: The xmlrpc client bound to the target spacewalk instance.
:param api_handle: The api instance to retrieve settings of.
:param user_enabled: Weather we allow Spacewalk users to log in or not.
:param username: The username to log in.
:param password: The password to log in.
:return: True if users are allowed to log in and he is of the role ``config_admin`` or ``org_admin``.
"""
logger = api_handle.logger
try:
session = xmlrpc_client.auth.login(username, password)
# login success by username, role must also match and user_enabled needs to be true.
roles = xmlrpc_client.user.listRoles(session, username)
if not isinstance(roles, list):
# FIXME: Double check what the actual API is for this!
logger.warning("Uyuni/SUMA returned roles not as a list!")
return False
if user_enabled and ("config_admin" in roles or "org_admin" in roles):
return True
except Error:
logger.error("Error while checking user authentication data.")
log_exc()
return False
def authenticate(api_handle: "CobblerAPI", username: str, password: str) -> bool:
# pylint: disable=line-too-long
"""
Validate a username/password combo. This will pass the username and password back to Spacewalk to see if this
authentication request is valid.
See also: https://github.com/uyuni-project/uyuni/blob/c9b7285117822af96c223cb0b6e0ae96ec7f0837/java/code/src/com/redhat/rhn/frontend/xmlrpc/auth/AuthHandler.java#L107
:param api_handle: The api instance to retrieve settings of.
:param username: The username to authenticate against spacewalk/uyuni/SUSE Manager
:param password: The password to authenticate against spacewalk/uyuni/SUSE Manager
:return: True if it succeeded, False otherwise.
:raises CX: Raised in case ``api_handle`` is missing.
"""
# pylint: enable=line-too-long
if api_handle is None:
raise CX("api_handle required. Please don't call this without it.")
server = api_handle.settings().redhat_management_server
user_enabled = api_handle.settings().redhat_management_permissive
spacewalk_url = f"https://{server}/rpc/api"
with ServerProxy(spacewalk_url, verbose=True) as client:
if username == "taskomatic_user" or __looks_like_a_token(password):
# The tokens are lowercase hex, but a password can also be lowercase hex, so we have to try it as both a
# token and then a password if we are unsure. We do it this way to be faster but also to avoid any login
# failed stuff in the logs that we don't need to send.
# Problem at this point, 0xdeadbeef is valid as a token but if that fails, it's also a valid password, so we
# must try auth system #2
if __check_auth_token(client, api_handle, username, password) != 1:
return __check_user_login(
client, api_handle, user_enabled, username, password
)
return True
# It's an older version of spacewalk, so just try the username/pass.
# OR: We know for sure it's not a token because it's not lowercase hex.
return __check_user_login(client, api_handle, user_enabled, username, password) |
299,703 | add transition | #=======================================================================
#
# Python Lexical Analyser
#
# Classes for building NFAs and DFAs
#
#=======================================================================
from __future__ import absolute_import
import sys
from .Transitions import TransitionMap
try:
from sys import maxsize as maxint
except ImportError:
from sys import maxint
try:
unichr
except NameError:
unichr = chr
LOWEST_PRIORITY = -maxint
class Machine(object):
"""A collection of Nodes representing an NFA or DFA."""
states = None # [Node]
next_state_number = 1
initial_states = None # {(name, bol): Node}
def __init__(self):
self.states = []
self.initial_states = {}
def __del__(self):
#print "Destroying", self ###
for state in self.states:
state.destroy()
def new_state(self):
"""Add a new state to the machine and return it."""
s = Node()
n = self.next_state_number
self.next_state_number = n + 1
s.number = n
self.states.append(s)
return s
def new_initial_state(self, name):
state = self.new_state()
self.make_initial_state(name, state)
return state
def make_initial_state(self, name, state):
self.initial_states[name] = state
def get_initial_state(self, name):
return self.initial_states[name]
def dump(self, file):
file.write("Plex.Machine:\n")
if self.initial_states is not None:
file.write(" Initial states:\n")
for (name, state) in sorted(self.initial_states.items()):
file.write(" '%s': %d\n" % (name, state.number))
for s in self.states:
s.dump(file)
class Node(object):
"""A state of an NFA or DFA."""
transitions = None # TransitionMap
action = None # Action
action_priority = None # integer
number = 0 # for debug output
epsilon_closure = None # used by nfa_to_dfa()
def __init__(self):
# Preinitialise the list of empty transitions, because
# the nfa-to-dfa algorithm needs it
#self.transitions = {'':[]}
self.transitions = TransitionMap()
self.action_priority = LOWEST_PRIORITY
def destroy(self):
#print "Destroying", self ###
self.transitions = None
self.action = None
self.epsilon_closure = None
def METHOD_NAME(self, event, new_state):
self.transitions.add(event, new_state)
def link_to(self, state):
"""Add an epsilon-move from this state to another state."""
self.METHOD_NAME('', state)
def set_action(self, action, priority):
"""Make this an accepting state with the given action. If
there is already an action, choose the action with highest
priority."""
if priority > self.action_priority:
self.action = action
self.action_priority = priority
def get_action(self):
return self.action
def get_action_priority(self):
return self.action_priority
def is_accepting(self):
return self.action is not None
def __str__(self):
return "State %d" % self.number
def dump(self, file):
# Header
file.write(" State %d:\n" % self.number)
# Transitions
# self.dump_transitions(file)
self.transitions.dump(file)
# Action
action = self.action
priority = self.action_priority
if action is not None:
file.write(" %s [priority %d]\n" % (action, priority))
def __lt__(self, other):
return self.number < other.number
class FastMachine(object):
"""
FastMachine is a deterministic machine represented in a way that
allows fast scanning.
"""
initial_states = None # {state_name:state}
states = None # [state] where state = {event:state, 'else':state, 'action':Action}
next_number = 1 # for debugging
new_state_template = {
'': None, 'bol': None, 'eol': None, 'eof': None, 'else': None
}
def __init__(self):
self.initial_states = {}
self.states = []
def __del__(self):
for state in self.states:
state.clear()
def new_state(self, action=None):
number = self.next_number
self.next_number = number + 1
result = self.new_state_template.copy()
result['number'] = number
result['action'] = action
self.states.append(result)
return result
def make_initial_state(self, name, state):
self.initial_states[name] = state
def add_transitions(self, state, event, new_state, maxint=maxint):
if type(event) is tuple:
code0, code1 = event
if code0 == -maxint:
state['else'] = new_state
elif code1 != maxint:
while code0 < code1:
state[unichr(code0)] = new_state
code0 += 1
else:
state[event] = new_state
def get_initial_state(self, name):
return self.initial_states[name]
def dump(self, file):
file.write("Plex.FastMachine:\n")
file.write(" Initial states:\n")
for name, state in sorted(self.initial_states.items()):
file.write(" %s: %s\n" % (repr(name), state['number']))
for state in self.states:
self.dump_state(state, file)
def dump_state(self, state, file):
# Header
file.write(" State %d:\n" % state['number'])
# Transitions
self.dump_transitions(state, file)
# Action
action = state['action']
if action is not None:
file.write(" %s\n" % action)
def dump_transitions(self, state, file):
chars_leading_to_state = {}
special_to_state = {}
for (c, s) in state.items():
if len(c) == 1:
chars = chars_leading_to_state.get(id(s), None)
if chars is None:
chars = []
chars_leading_to_state[id(s)] = chars
chars.append(c)
elif len(c) <= 4:
special_to_state[c] = s
ranges_to_state = {}
for state in self.states:
char_list = chars_leading_to_state.get(id(state), None)
if char_list:
ranges = self.chars_to_ranges(char_list)
ranges_to_state[ranges] = state
ranges_list = ranges_to_state.keys()
ranges_list.sort()
for ranges in ranges_list:
key = self.ranges_to_string(ranges)
state = ranges_to_state[ranges]
file.write(" %s --> State %d\n" % (key, state['number']))
for key in ('bol', 'eol', 'eof', 'else'):
state = special_to_state.get(key, None)
if state:
file.write(" %s --> State %d\n" % (key, state['number']))
def chars_to_ranges(self, char_list):
char_list.sort()
i = 0
n = len(char_list)
result = []
while i < n:
c1 = ord(char_list[i])
c2 = c1
i += 1
while i < n and ord(char_list[i]) == c2 + 1:
i += 1
c2 += 1
result.append((chr(c1), chr(c2)))
return tuple(result)
def ranges_to_string(self, range_list):
return ','.join(map(self.range_to_string, range_list))
def range_to_string(self, range_tuple):
(c1, c2) = range_tuple
if c1 == c2:
return repr(c1)
else:
return "%s..%s" % (repr(c1), repr(c2)) |
299,704 | af | import numpy as np
import pytest
import pyccl as ccl
from .test_cclobject import check_eq_repr_hash
def test_Tk3D_eq_repr_hash():
# Test eq, repr, hash for Tk3D.
cosmo = ccl.CosmologyVanillaLCDM(transfer_function="bbks")
cosmo.compute_linear_power()
PK1 = cosmo.get_linear_power()
# 1. Using a factorizable Tk3D object.
a_arr, lk_arr, pk_arr = PK1.get_spline_arrays()
TK1 = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr,
pk1_arr=pk_arr, pk2_arr=pk_arr, is_logt=False)
TK2 = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr,
pk1_arr=pk_arr, pk2_arr=pk_arr, is_logt=False)
assert check_eq_repr_hash(TK1, TK2)
TK3 = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr,
pk1_arr=2*pk_arr, pk2_arr=2*pk_arr, is_logt=False)
assert check_eq_repr_hash(TK1, TK3, equal=False)
# 2. Using a non-factorizable Tk3D object.
a_arr_2 = np.arange(0.5, 0.9, 0.1)
lk_arr_2 = np.linspace(-2, 1, 8)
TK4 = ccl.Tk3D(
a_arr=a_arr_2, lk_arr=lk_arr_2,
tkk_arr=np.ones((a_arr_2.size, lk_arr_2.size, lk_arr_2.size)))
TK5 = ccl.Tk3D(
a_arr=a_arr_2, lk_arr=lk_arr_2,
tkk_arr=np.ones((a_arr_2.size, lk_arr_2.size, lk_arr_2.size)))
assert check_eq_repr_hash(TK4, TK5)
TK6 = ccl.Tk3D(
a_arr=a_arr_2, lk_arr=lk_arr_2,
tkk_arr=2*np.ones((a_arr_2.size, lk_arr_2.size, lk_arr_2.size)))
assert check_eq_repr_hash(TK4, TK6, equal=False)
# edge-case: comparing different types
assert check_eq_repr_hash(TK1, 1, equal=False)
# edge-case: empty objects
tka1, tka2 = [ccl.Tk3D.__new__(ccl.Tk3D) for _ in range(2)]
assert check_eq_repr_hash(tka1, tka2)
# edge-case: only one Tk is factorizable (exits early)
assert check_eq_repr_hash(TK1, TK4, equal=False)
# edge-case: different extrapolation orders
a_arr, lk_arr, pk_arr = PK1.get_spline_arrays()
t1 = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=pk_arr, pk2_arr=pk_arr,
is_logt=False, extrap_order_lok=0)
t2 = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=pk_arr, pk2_arr=pk_arr,
is_logt=False, extrap_order_lok=1)
assert check_eq_repr_hash(t1, t2, equal=False)
def kf(k):
return (k/0.1)**(-1)
def METHOD_NAME(a):
return np.exp(a-1)
def fka1f(k, a):
return kf(k)*METHOD_NAME(a)**2
def fka2f(k, a):
return (kf(k))**2.1*METHOD_NAME(a)**2
def tkkaf(k1, k2, a):
return fka1f(k1, a)*fka2f(k2, a)
def get_arrays(islog=True):
a_arr = np.linspace(0.05, 1., 10)
k_arr = np.geomspace(1E-4, 1E2, 10)
lk_arr = np.log(k_arr)
fka1_arr = np.array([fka1f(k_arr, a) for a in a_arr])
fka2_arr = np.array([fka2f(k_arr, a) for a in a_arr])
tkka_arr = np.array([tkkaf(k_arr[None, :],
k_arr[:, None], a)
for a in a_arr])
if islog:
fka1_arr = np.log(fka1_arr)
fka2_arr = np.log(fka2_arr)
tkka_arr = np.log(tkka_arr)
return (a_arr, lk_arr, fka1_arr,
fka2_arr, tkka_arr)
def test_tk3d_errors():
"""
Test initialization of Pk2D objects
"""
(a_arr, lk_arr, fka1_arr, fka2_arr, tkka_arr) = get_arrays()
# Decreasing a
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a_arr[::-1], lk_arr=lk_arr,
tkk_arr=tkka_arr)
# Decreasing lk
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr[::-1],
tkk_arr=tkka_arr)
# Non monotonic
a2 = a_arr.copy()
a2[1] = a2[0]
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a2, lk_arr=lk_arr, tkk_arr=tkka_arr)
# If no input
with pytest.raises(TypeError):
ccl.Tk3D()
# No input tkk or fkas
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr)
# Missing one fka factor
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk2_arr=fka2_arr)
# fka has wrong shape
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=tkka_arr)
# tkka has wrong shape
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=fka1_arr)
# Wrong extrapolation orders
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr,
tkk_arr=tkka_arr, extrap_order_hik=-1)
with pytest.raises(ValueError):
ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr,
tkk_arr=tkka_arr, extrap_order_lok=2)
def test_tk3d_smoke():
"""Make sure it works once."""
(a_arr, lk_arr, fka1_arr, fka2_arr, tkka_arr) = get_arrays()
tsp1 = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=fka1_arr,
pk2_arr=fka2_arr)
tsp2 = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkka_arr)
assert not np.isnan(tsp1(1E-2, 0.5))
assert not np.isnan(tsp2(1E-2, 0.5))
def test_tk3d_delete():
"""Check that ccl.Tk3D.__del__ works."""
(a_arr, lk_arr, fka1_arr, fka2_arr, tkka_arr) = get_arrays()
tsp = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=fka1_arr,
pk2_arr=fka2_arr)
# This should not cause an ignored exception
del tsp
@pytest.mark.parametrize('is_product', [True, False])
def test_tk3d_eval(is_product):
(a_arr, lk_arr, fka1_arr, fka2_arr, tkka_arr) = get_arrays()
if is_product:
tsp = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, pk1_arr=fka1_arr,
pk2_arr=fka2_arr)
else:
tsp = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkka_arr)
# Test at single point
ktest = 0.7
atest = 0.5
ptrue = tkkaf(ktest, ktest, atest)
phere = tsp(ktest, atest)
assert np.allclose(phere, ptrue, atol=0, rtol=1e-6)
ktest = 5E-5
atest = 0.5
ptrue = tkkaf(ktest, ktest, atest)
phere = tsp(ktest, atest)
assert np.allclose(phere, ptrue, atol=0, rtol=1e-6)
# Test at array of points
ktest = np.logspace(-3, 1, 10)
ptrue = tkkaf(ktest[None, :], ktest[:, None], atest)
phere = tsp(ktest, atest)
assert np.allclose(phere, ptrue, atol=0, rtol=1e-6)
def test_tk3d_call():
# Test `__call__` and `__bool__`
(a_arr, lk_arr, fka1_arr, fka2_arr, tkka_arr) = get_arrays()
tsp = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkka_arr)
assert bool(tsp) is tsp.has_tsp
assert np.allclose(np.array([tsp(np.exp(lk_arr), a) for a in a_arr]),
tsp(np.exp(lk_arr), a_arr), rtol=1e-15)
@pytest.mark.parametrize('is_product', [True, False])
def test_tk3d_spline_arrays(is_product):
(a_arr, lk_arr, fka1_arr, fka2_arr, tkka_arr) = get_arrays()
if is_product:
tsp = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr,
pk1_arr=fka1_arr, pk2_arr=fka2_arr)
else:
tsp = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkka_arr)
a_get, lk_get1, lk_get2, out = tsp.get_spline_arrays()
assert np.allclose(a_get, a_arr, rtol=1e-15)
assert np.allclose(lk_get1, lk_arr, rtol=1e-15)
assert np.allclose(lk_get2, lk_arr, rtol=1e-15)
if is_product:
assert np.allclose(np.log(out[0]), fka1_arr, rtol=1e-15)
assert np.allclose(np.log(out[1]), fka2_arr, rtol=1e-15)
else:
assert np.allclose(np.log(out[0]), tkka_arr, rtol=1e-15)
def test_tk3d_spline_arrays_raises():
(a_arr, lk_arr, fka1_arr, fka2_arr, tkka_arr) = get_arrays()
tsp = ccl.Tk3D(a_arr=a_arr, lk_arr=lk_arr, tkk_arr=tkka_arr)
ccl.lib.f3d_t_free(tsp.tsp)
delattr(tsp, "tsp")
with pytest.raises(ValueError):
tsp.get_spline_arrays() |
299,705 | clean nxos data | import re
import numpy as np
from suzieq.poller.worker.services.service import Service
from suzieq.shared.utils import (
convert_macaddr_format_to_colon, expand_ios_ifname)
class ArpndService(Service):
"""arpnd service. Different class because minor munging of output"""
def _clean_linux_data(self, processed_data, _):
for entry in processed_data:
entry["remote"] = entry["remote"] == "offload"
entry["state"] = entry["state"].lower()
if entry["state"] == "stale" or entry["state"] == "delay":
entry["state"] = "reachable"
elif entry['state'] in ['extern_learn', 'offload']:
entry['state'] = 'reachable'
entry['remote'] = True
if not entry.get('macaddr', None):
entry['macaddr'] = '00:00:00:00:00:00'
entry['state'] = 'failed'
return processed_data
def _clean_cumulus_data(self, processed_data, raw_data):
return self._clean_linux_data(processed_data, raw_data)
def _clean_sonic_data(self, processed_data, raw_data):
return self._clean_linux_data(processed_data, raw_data)
def _clean_eos_data(self, processed_data, _):
for entry in processed_data:
entry['macaddr'] = convert_macaddr_format_to_colon(
entry.get('macaddr', '0000.0000.0000'))
# EOS has entries with OIF of type: "Vlan4094, Port-Channel1"
# We need only the first part, we pick up the second from the
# MAC table
if ',' in entry['oif']:
ports = entry['oif'].split(',')
entry['oif'] = ports[0].strip()
if ports[1].strip() == 'Vxlan1':
entry['remote'] = True
return processed_data
def _clean_junos_data(self, processed_data, _):
for entry in processed_data:
if '[vtep.' in entry['oif']:
entry['remote'] = True
if entry['oif']:
entry['oif'] = re.sub(r' \[.*\]', '', entry['oif'])
entry['state'] = 'reachable'
if not entry.get('macaddr', None):
entry['macaddr'] = '00:00:00:00:00:00'
return processed_data
def METHOD_NAME(self, processed_data, _):
drop_indices = []
for i, entry in enumerate(processed_data):
if not entry['ipAddress']:
drop_indices.append(i)
continue
if 'state' not in entry:
# textfsm version handling
entry['state'] = 'reachable'
macaddr = entry.get('macaddr', '')
if macaddr:
macaddr = macaddr.lower()
if macaddr in ['', 'incomplete']:
entry['state'] = "failed"
entry['macaddr'] = '00:00:00:00:00:00'
else:
entry['macaddr'] = convert_macaddr_format_to_colon(
macaddr or '0000.0000.0000')
processed_data = np.delete(processed_data,
drop_indices).tolist()
return processed_data
def _clean_common_ios_data(self, processed_data, _):
for entry in processed_data:
macaddr = entry.get('macaddr', '')
if macaddr:
macaddr = macaddr.lower()
if macaddr in ['', 'incomplete']:
entry['state'] = "failed"
entry['macaddr'] = '00:00:00:00:00:00'
else:
entry['macaddr'] = convert_macaddr_format_to_colon(
entry.get('macaddr', '0000.0000.0000'))
entry['state'] = "reachable"
if ':' in entry['ipAddress']:
# We need to fix up the interface name for IPv6 ND entriie
entry['oif'] = expand_ios_ifname(entry.get('oif', ''))
return processed_data
def _clean_iosxr_data(self, processed_data, raw_data):
return self._clean_common_ios_data(processed_data, raw_data)
def _clean_iosxe_data(self, processed_data, raw_data):
return self._clean_common_ios_data(processed_data, raw_data)
def _clean_ios_data(self, processed_data, raw_data):
return self._clean_common_ios_data(processed_data, raw_data)
def _clean_panos_data(self, processed_data, _):
for entry in processed_data:
# status: s = static, c = complete, e = expiring, i = incomplete
# ARP entries are shown with status as merely a letter while
# ND entries are shown with the status as a self-respecting word.
# sigh
state = entry.get("state", "").lower()
if state in ["s", "static"]:
entry["state"] = "permanent"
elif state in ["c", "e", "stale", "reachable"]:
entry["state"] = "reachable"
else:
entry["state"] = "failed"
return processed_data |
299,706 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, METHOD_NAME=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, system_data=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
METHOD_NAME=self.METHOD_NAME,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
system_data=self.system_data,
type=self.type)
def get_private_endpoint_connection(private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Gets the specified private endpoint connection associated with the service.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
__args__ = dict()
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:healthcareapis/v20230228:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
METHOD_NAME=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Gets the specified private endpoint connection associated with the service.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: The name of the resource group that contains the service instance.
:param str resource_name: The name of the service instance.
"""
... |
299,707 | write iis | """
This module contains functions for computing an irreducible infeasible set
for a Pyomo MILP or LP using a specified commercial solver, one of CPLEX,
Gurobi, or Xpress.
"""
import abc
import logging
from pyomo.environ import SolverFactory
logger = logging.getLogger("pyomo.contrib.iis")
logger.setLevel(logging.INFO)
def METHOD_NAME(pyomo_model, iis_file_name, solver=None, logger=logger):
"""
Write an irreducible infeasible set for a Pyomo MILP or LP
using the specified commercial solver.
Arguments
---------
pyomo_model:
A Pyomo Block or ConcreteModel
iis_file_name:str
A file name to write the IIS to, e.g., infeasible_model.ilp
solver:str
Specify the solver to use, one of "cplex", "gurobi", or "xpress".
If None, the tool will use the first solver available.
logger:logging.Logger
A logger for messages. Uses pyomo.contrib.iis logger by default.
Returns
-------
iis_file_name:str
The file containing the IIS.
"""
available_solvers = [
s
for s, sp in zip(_supported_solvers, _supported_solvers_persistent)
if SolverFactory(sp).available(exception_flag=False)
]
if solver is None:
if len(available_solvers) == 0:
raise RuntimeError(
f"Could not find a solver to use, supported solvers are {_supported_solvers}"
)
solver = available_solvers[0]
logger.info(f"Using solver {solver}")
else:
# validate
solver = solver.lower()
solver = _remove_suffix(solver, "_persistent")
if solver not in available_solvers:
raise RuntimeError(
f"The Pyomo persistent interface to {solver} could not be found."
)
solver_name = solver
solver = SolverFactory(solver + "_persistent")
solver.set_instance(pyomo_model, symbolic_solver_labels=True)
iis = IISFactory(solver)
iis.compute()
iis_file_name = iis.write(iis_file_name)
logger.info(f"IIS written to {iis_file_name}")
return iis_file_name
def _remove_suffix(string, suffix):
if string.endswith(suffix):
return string[: -len(suffix)]
else:
return string
class _IISBase(abc.ABC):
def __init__(self, solver):
self._solver = solver
@abc.abstractmethod
def compute(self):
"""computes the IIS/Conflict"""
pass
@abc.abstractmethod
def write(self, file_name):
"""writes the IIS in LP format
return the file name written
"""
pass
class CplexConflict(_IISBase):
def compute(self):
self._solver._solver_model.conflict.refine()
def write(self, file_name):
self._solver._solver_model.conflict.write(file_name)
return file_name
class GurobiIIS(_IISBase):
def compute(self):
self._solver._solver_model.computeIIS()
def write(self, file_name):
# gurobi relies on the suffix to
# determine the file type
file_name = _remove_suffix(file_name, ".ilp")
file_name += ".ilp"
self._solver._solver_model.write(file_name)
return file_name
class XpressIIS(_IISBase):
def compute(self):
self._solver._solver_model.iisfirst(1)
def write(self, file_name):
self._solver._solver_model.iiswrite(0, file_name, 0, "l")
if self._solver._version[0] < 38:
return file_name
else:
return _remove_suffix(file_name, ".lp") + ".lp"
_solver_map = {
"cplex_persistent": CplexConflict,
"gurobi_persistent": GurobiIIS,
"xpress_persistent": XpressIIS,
}
def IISFactory(solver):
if solver.name not in _solver_map:
raise RuntimeError(f"Unrecognized solver {solver.name}")
return _solver_map[solver.name](solver)
_supported_solvers_persistent = list(_solver_map.keys())
_supported_solvers = [_remove_suffix(s, "_persistent") for s in _solver_map] |
299,708 | efi mode | # Copyright (c) 2015 SUSE Linux GmbH. All rights reserved.
#
# This file is part of kiwi.
#
# kiwi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# kiwi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with kiwi. If not, see <http://www.gnu.org/licenses/>
#
import re
# project
from .defaults import Defaults
from .exceptions import (
KiwiNotImplementedError
)
class FirmWare:
"""
**Implements firmware specific methods**
According to the selected firmware some parameters in a disk
image changes. This class provides methods to provide firmware
dependant information
* :param object xml_state: instance of :class:`XMLState`
"""
def __init__(self, xml_state):
self.arch = Defaults.get_platform_name()
self.zipl_target_type = \
xml_state.get_build_type_bootloader_targettype()
self.firmware = xml_state.build_type.get_firmware()
self.efipart_mbytes = xml_state.build_type.get_efipartsize()
self.efi_partition_table = xml_state.build_type.get_efiparttable()
if not self.firmware:
self.firmware = Defaults.get_default_firmware(self.arch)
if self.firmware and self.firmware != 'custom':
firmware_types = Defaults.get_firmware_types()
if self.firmware not in firmware_types[self.arch]:
raise KiwiNotImplementedError(
'support for firmware %s for arch %s not implemented' %
(self.firmware, self.arch)
)
def get_partition_table_type(self):
"""
Provides partition table type according to architecture and firmware
:return: partition table name
:rtype: str
"""
if 's390' in self.arch:
if self.zipl_target_type and 'CDL' in self.zipl_target_type:
return 'dasd'
else:
return 'msdos'
elif 'ppc64' in self.arch:
return 'gpt'
elif self.METHOD_NAME():
default_efi_table = Defaults.get_default_efi_partition_table_type()
return self.efi_partition_table or default_efi_table
else:
return 'msdos'
def legacy_bios_mode(self):
"""
Check if the legacy boot from BIOS systems should be activated
:return: True or False
:rtype: bool
"""
if self.get_partition_table_type() == 'gpt':
if self.arch == 'x86_64' or re.match('i.86', self.arch):
return True
else:
return False
else:
return False
def METHOD_NAME(self):
"""
Check if EFI mode is requested
:return: The requested EFI mode or None if no EFI mode requested
:rtype: str
"""
if self.firmware in Defaults.get_efi_capable_firmware_names():
return self.firmware
def ec2_mode(self):
"""
Check if EC2 mode is requested
:return: True or False
:rtype: bool
"""
if self.firmware in Defaults.get_ec2_capable_firmware_names():
return self.firmware
def bios_mode(self):
"""
Check if BIOS mode is requested
:return: True or False
:rtype: bool
"""
if self.firmware == 'bios':
return True
else:
return False
def ofw_mode(self):
"""
Check if OFW mode is requested
:return: True or False
:rtype: bool
"""
if self.firmware == 'ofw':
return True
else:
return False
def opal_mode(self):
"""
Check if Opal mode is requested
:return: True or False
:rtype: bool
"""
if self.firmware == 'opal':
return True
else:
return False
def get_legacy_bios_partition_size(self):
"""
Size of legacy bios_grub partition if legacy BIOS mode is
required. Returns 0 if no such partition is needed
:return: mbsize value
:rtype: int
"""
if self.legacy_bios_mode():
return Defaults.get_default_legacy_bios_mbytes()
else:
return 0
def get_efi_partition_size(self):
"""
Size of EFI partition.
Returns 0 if no such partition is needed
:return: mbsize value
:rtype: int
"""
if self.METHOD_NAME():
if self.efipart_mbytes:
return self.efipart_mbytes
else:
return Defaults.get_default_efi_boot_mbytes()
else:
return 0
def get_prep_partition_size(self):
"""
Size of Prep partition if OFW mode is requested.
Returns 0 if no such partition is needed
:return: mbsize value
:rtype: int
"""
if self.ofw_mode():
return Defaults.get_default_prep_mbytes()
else:
return 0 |
299,709 | test infer | # Copyright 2020-2023 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import gc
import cv2
import shutil
import os
import numpy as np
from opendr.perception.object_detection_2d import YOLOv3DetectorLearner
from opendr.perception.object_detection_2d import WiderPersonDataset
device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu'
def rmfile(path):
try:
os.remove(path)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
def rmdir(_dir):
try:
shutil.rmtree(_dir)
except OSError as e:
print("Error: %s - %s." % (e.filename, e.strerror))
class TestYOLOv3DetectorLearner(unittest.TestCase):
@classmethod
def setUpClass(cls):
print("\n\n**********************************\nTEST YOLOv3Detector Learner\n"
"**********************************")
cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "object_detection_2d",
"yolov3", "yolov3_temp")
cls.detector = YOLOv3DetectorLearner(device=device, temp_path=cls.temp_dir, batch_size=1, epochs=1,
checkpoint_after_iter=0, lr=1e-4, num_workers=0, img_size=320)
# Download all required files for testing
cls.detector.download(mode="pretrained")
cls.detector.download(mode="images")
cls.detector.download(mode="test_data")
@classmethod
def tearDownClass(cls):
print('Removing temporary directories for YOLOv3...')
# Clean up downloaded files
rmfile(os.path.join(cls.temp_dir, "cat.jpg"))
rmdir(os.path.join(cls.temp_dir, "yolo_default"))
rmdir(os.path.join(cls.temp_dir, "test_data"))
rmdir(os.path.join(cls.temp_dir))
del cls.detector
gc.collect()
print('Finished cleaning for YOLOv3...')
def test_fit(self):
print('Starting training test for YOLov3...')
training_dataset = WiderPersonDataset(root=os.path.join(self.temp_dir, "test_data"), splits=['train'])
m = list(self.detector._model.collect_params().values())[1].data().asnumpy().copy()
self.detector.fit(dataset=training_dataset, verbose=True)
n = list(self.detector._model.collect_params().values())[1].data().asnumpy()
self.assertFalse(np.array_equal(m, n),
msg="Model parameters did not change after running fit.")
del training_dataset, m, n
gc.collect()
print('Finished training test for YOLOv3...')
def test_eval(self):
print('Starting evaluation test for YOLOv3...')
eval_dataset = WiderPersonDataset(root=os.path.join(self.temp_dir, "test_data"), splits=['train'])
self.detector.load(os.path.join(self.temp_dir, "yolo_default"))
results_dict = self.detector.eval(eval_dataset)
self.assertIsNotNone(results_dict['map'],
msg="Eval results dictionary not returned.")
del eval_dataset, results_dict
gc.collect()
print('Finished evaluation test for YOLOv3...')
def METHOD_NAME(self):
print('Starting inference test for YOLOv3...')
self.detector.load(os.path.join(self.temp_dir, "yolo_default"))
img = cv2.imread(os.path.join(self.temp_dir, "cat.jpg"))
self.assertIsNotNone(self.detector.infer(img),
msg="Returned empty BoundingBoxList.")
del img
gc.collect()
print('Finished inference test for YOLOv3...')
def test_save_load(self):
print('Starting save/load test for YOLOv3...')
self.detector.save(os.path.join(self.temp_dir, "test_model"))
self.detector._model = None
self.detector.load(os.path.join(self.temp_dir, "test_model"))
self.assertIsNotNone(self.detector._model, "model is None after loading model.")
# Cleanup
rmdir(os.path.join(self.temp_dir, "test_model"))
print('Finished save/load test for YOLOv3...')
if __name__ == "__main__":
unittest.main() |
299,710 | shutdown event | import io
import os
import traceback
from distutils.util import strtobool
import pydantic
import yaml
from aiohttp import ClientResponseError
from fastapi import FastAPI, Request, Response
from fastapi.exceptions import HTTPException
from fastapi.responses import JSONResponse
from app.event_handling.websocket_manager import WebsocketManager
from app.exceptions.cloud_api_error import CloudApiException
from app.routes import (
connections,
definitions,
issuer,
jsonld,
messaging,
oob,
sse,
trust_registry,
verifier,
wallet,
webhooks,
)
from app.routes.admin import tenants
from shared.log_config import get_logger
OPENAPI_NAME = os.getenv("OPENAPI_NAME", "OpenAPI")
PROJECT_VERSION = os.getenv("PROJECT_VERSION", "0.9.0")
logger = get_logger(__name__)
prod = strtobool(os.environ.get("prod", "True"))
debug = not prod
def create_app() -> FastAPI:
routes = [
tenants,
connections,
definitions,
issuer,
jsonld,
messaging,
oob,
trust_registry,
verifier,
wallet,
webhooks,
sse,
]
application = FastAPI(
debug=debug,
title=OPENAPI_NAME,
description="""
Welcome to the Aries CloudAPI Python project.
In addition to the traditional HTTP-based endpoints described below, we also offer WebSocket endpoints for real-time interfacing with webhook events.
WebSocket endpoints are authenticated. This means that only users with valid authentication tokens can establish a WebSocket connection, and they can only subscribe to their own wallet's events. However, Admin users have the ability to subscribe by topic, or to any wallet.
Our WebSocket endpoints are as follows:
1. `/ws/topic/{topic}`: (Admin only) This endpoint allows admins to receive all webhook events on a specific topic (e.g. `connections`, `credentials`, `proofs`, `endorsements`).
2. `/ws/{wallet_id}`: This endpoint allows authenticated users to receive webhook events associated with a specific wallet ID.
3. `/ws/{wallet_id}/{topic}`: Similar to above, but with topic-specific subscription.
For authentication, the WebSocket headers should include `x-api-key`: `<your key>`.
Please refer to our API documentation for more details about our authentication mechanism, as well as for information about the available topics.
""",
version=PROJECT_VERSION,
)
for route in routes:
# Routes will appear in the openapi docs with the same order as defined in `routes`
application.include_router(route.router)
return application
app = create_app()
@app.on_event("shutdown")
async def METHOD_NAME():
logger.info("Calling WebsocketManager shutdown")
await WebsocketManager.disconnect_all()
# add endpoints
# additional yaml version of openapi.json
@app.get("/openapi.yaml", include_in_schema=False)
def read_openapi_yaml() -> Response:
logger.info("GET request received: OpenAPI yaml endpoint")
openapi_json = app.openapi()
yaml_s = io.StringIO()
yaml.dump(openapi_json, yaml_s, allow_unicode=True, sort_keys=False)
logger.info("Returning OpenAPI yaml text.")
return Response(content=yaml_s.getvalue(), media_type="text/yaml")
@app.exception_handler(Exception)
async def client_response_error_exception_handler(
request: Request, exception: Exception
):
stacktrace = {"stack": traceback.format_exc()}
if isinstance(exception, ClientResponseError):
return JSONResponse(
{"detail": exception.message, **(stacktrace if debug else {})},
exception.status or 500,
)
if isinstance(exception, CloudApiException):
return JSONResponse(
{"detail": exception.detail, **(stacktrace if debug else {})},
exception.status_code,
)
if isinstance(exception, HTTPException):
return JSONResponse(
{**exception.detail, **(stacktrace if debug else {})},
exception.status_code,
exception.headers,
)
if isinstance(exception, pydantic.error_wrappers.ValidationError):
return JSONResponse({"detail": exception.errors()}, status_code=422)
else:
return JSONResponse(
{"detail": "Internal server error", "exception": str(exception)}, 500
) |
299,711 | populate temp filesystem | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import os
import subprocess
import tempfile
import unittest
from contextlib import contextmanager
from unittest import mock
from antlir.buck.buck_label.buck_label_py import Label
from antlir.compiler.items.common import assert_running_inside_ba, LayerOpts
from antlir.compiler.requires_provides import (
ProvidesDirectory,
ProvidesFile,
ProvidesSymlink,
)
from antlir.fs_utils import Path
from antlir.nspawn_in_subvol.args import new_nspawn_opts, PopenArgs
from antlir.nspawn_in_subvol.nspawn import run_nspawn
from antlir.subvol_utils import Subvol
from antlir.tests.layer_resource import layer_resource_subvol
from antlir.tests.subvol_helpers import pop_path, render_subvol
# Re-export for legacy reasons
pop_path = pop_path
render_subvol = render_subvol
DEFAULT_STAT_OPTS = ["--user=root", "--group=root", "--mode=0755"]
DUMMY_LAYER_OPTS = LayerOpts(
layer_target=Label("fbcode//path/to/fake:target"), # Only used by error messages
build_appliance=None,
# For a handful of tests, this must be a boolean value so the layer
# emits it it into /.meta, but the value is not important.
artifacts_may_require_repo=True,
# pyre-fixme[6]: Expected `Mapping[str, str]` for 4th param but got `None`.
target_to_path=None,
# pyre-fixme[6]: Expected `Path` for 5th param but got `None`.
subvolumes_dir=None,
rpm_installer=None,
version_set_override=None,
rpm_repo_snapshot=None,
# pyre-fixme[6]: Expected `frozenset[str]` for 9th param but got
# `List[Variable[_T]]`.
allowed_host_mount_targets=[],
flavor="antlir_test",
)
# This has to be a function because using `importlib` while loading a module
# results in incorrect behavior (I did not debug the specifics).
def get_dummy_layer_opts_ba(ba_subvol=None):
return DUMMY_LAYER_OPTS._replace(
build_appliance=ba_subvol
or layer_resource_subvol(__package__, "test-build-appliance")
)
def METHOD_NAME(img_path) -> None:
"Matching Provides are generated by _temp_filesystem_provides"
def p(img_rel_path):
return os.path.join(img_path, img_rel_path)
os.makedirs(p("a/b/c"))
os.makedirs(p("a/d"))
for filepath in ["a/E", "a/d/F", "a/b/c/G"]:
with open(p(filepath), "w") as f:
f.write("Hello, " + filepath)
os.symlink("a", p("h"))
os.symlink("a/E", p("i"))
os.symlink("./a/b", p("j"))
os.symlink("../a", p("h/k"))
os.symlink("/a", p("l"))
os.symlink("/a/E", p("m"))
@contextmanager
def temp_filesystem():
with tempfile.TemporaryDirectory() as td_path:
METHOD_NAME(td_path)
yield td_path
def temp_filesystem_provides(p: str = ""):
"Captures what is provided by _temp_filesystem, if installed at `p`"
"inside the image."
return {
ProvidesDirectory(path=Path(f"{p}/a")),
ProvidesDirectory(path=Path(f"{p}/a/b")),
ProvidesDirectory(path=Path(f"{p}/a/b/c")),
ProvidesDirectory(path=Path(f"{p}/a/d")),
ProvidesFile(path=Path(f"{p}/a/E")),
ProvidesFile(path=Path(f"{p}/a/d/F")),
ProvidesFile(path=Path(f"{p}/a/b/c/G")),
ProvidesSymlink(path=Path(f"{p}/h"), target=(Path("a"))),
ProvidesSymlink(path=Path(f"{p}/i"), target=(Path("a/E"))),
ProvidesSymlink(path=Path(f"{p}/j"), target=(Path("./a/b"))),
ProvidesSymlink(path=Path(f"{p}/a/k"), target=(Path("../a"))),
ProvidesSymlink(path=Path(f"{p}/l"), target=(Path("/a"))),
ProvidesSymlink(path=Path(f"{p}/m"), target=(Path("/a/E"))),
}
def run_in_ba(layer: Subvol, cmd) -> subprocess.CompletedProcess:
res, _ = run_nspawn(
new_nspawn_opts(
cmd=cmd,
layer=layer,
),
PopenArgs(
stdout=subprocess.PIPE,
),
)
return res
def getent(layer: Subvol, dbtype: str, name: str) -> bytes:
return run_in_ba(
cmd=["getent", dbtype, name],
layer=layer,
).stdout
class BaseItemTestCase(unittest.TestCase):
def setUp(self) -> None: # More output for easier debugging
unittest.util._MAX_LENGTH = 12345
self.maxDiff = 12345
def _check_item(self, i, provides, requires) -> None:
self.assertEqual(provides, set(i.provides()))
self.assertEqual(requires, set(i.requires()))
def with_mocked_temp_volume_dir(method):
assert_running_inside_ba()
@functools.wraps(method)
def decorated(self, *args, **kwargs):
with mock.patch(
"antlir.subvol_utils._tmp_volume_dir",
mock.Mock(return_value=Path("/")),
):
return method(self, *args, **kwargs)
return decorated |
299,712 | initialize handler | """
:mod: DataIntegrityHandler
.. module: DataIntegrityHandler
:synopsis: DataIntegrityHandler is the implementation of the Data Integrity service in
the DISET framework
"""
# from DIRAC
from DIRAC import S_OK
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.DataManagementSystem.DB.DataIntegrityDB import DataIntegrityDB
class DataIntegrityHandlerMixin:
"""
.. class:: DataIntegrityHandler
Implementation of the Data Integrity service in the DISET framework.
"""
@classmethod
def METHOD_NAME(cls, serviceInfoDict):
"""Initialization of DB object"""
cls.dataIntegrityDB = DataIntegrityDB(parentLogger=cls.log)
return S_OK()
types_removeProblematic = [[int, list]]
def export_removeProblematic(self, fileID):
"""Remove the file with the supplied FileID from the database"""
if isinstance(fileID, list):
fileIDs = fileID
else:
fileIDs = [int(fileID)]
self.log.info("DataIntegrityHandler.removeProblematic: Attempting to remove problematic.")
res = self.dataIntegrityDB.removeProblematic(fileIDs)
if not res["OK"]:
self.log.error("DataIntegrityHandler.removeProblematic: Failed to remove problematic.", res["Message"])
return res
types_getProblematic = []
def export_getProblematic(self):
"""Get the next problematic to resolve from the IntegrityDB"""
self.log.info("DataIntegrityHandler.getProblematic: Getting file to resolve.")
res = self.dataIntegrityDB.getProblematic()
if not res["OK"]:
self.log.error(
"DataIntegrityHandler.getProblematic: Failed to get problematic file to resolve.", res["Message"]
)
return res
types_getPrognosisProblematics = [str]
def export_getPrognosisProblematics(self, prognosis):
"""Get problematic files from the problematics table of the IntegrityDB"""
self.log.info(f"DataIntegrityHandler.getPrognosisProblematics: Getting files with {prognosis} prognosis.")
res = self.dataIntegrityDB.getPrognosisProblematics(prognosis)
if not res["OK"]:
self.log.error(
"DataIntegrityHandler.getPrognosisProblematics: Failed to get prognosis files.", res["Message"]
)
return res
types_setProblematicStatus = [int, str]
def export_setProblematicStatus(self, fileID, status):
"""Update the status of the problematics with the provided fileID"""
self.log.info(f"DataIntegrityHandler.setProblematicStatus: Setting file {fileID} status to {status}.")
res = self.dataIntegrityDB.setProblematicStatus(fileID, status)
if not res["OK"]:
self.log.error("DataIntegrityHandler.setProblematicStatus: Failed to set status.", res["Message"])
return res
types_incrementProblematicRetry = [int]
def export_incrementProblematicRetry(self, fileID):
"""Update the retry count for supplied file ID."""
self.log.info(f"DataIntegrityHandler.incrementProblematicRetry: Incrementing retries for file {fileID}.")
res = self.dataIntegrityDB.incrementProblematicRetry(fileID)
if not res["OK"]:
self.log.error(
"DataIntegrityHandler.incrementProblematicRetry: Failed to increment retries.", res["Message"]
)
return res
types_insertProblematic = [str, dict]
def export_insertProblematic(self, source, fileMetadata):
"""Insert problematic files into the problematics table of the IntegrityDB"""
self.log.info("DataIntegrityHandler.insertProblematic: Inserting problematic file to integrity DB.")
res = self.dataIntegrityDB.insertProblematic(source, fileMetadata)
if not res["OK"]:
self.log.error("DataIntegrityHandler.insertProblematic: Failed to insert.", res["Message"])
return res
types_changeProblematicPrognosis = []
def export_changeProblematicPrognosis(self, fileID, newPrognosis):
"""Change the prognosis for the supplied file"""
self.log.info("DataIntegrityHandler.changeProblematicPrognosis: Changing problematic prognosis.")
res = self.dataIntegrityDB.changeProblematicPrognosis(fileID, newPrognosis)
if not res["OK"]:
self.log.error("DataIntegrityHandler.changeProblematicPrognosis: Failed to update.", res["Message"])
return res
types_getTransformationProblematics = [int]
def export_getTransformationProblematics(self, transID):
"""Get the problematics for a given transformation"""
self.log.info("DataIntegrityHandler.getTransformationProblematics: Getting problematics for transformation.")
res = self.dataIntegrityDB.getTransformationProblematics(transID)
if not res["OK"]:
self.log.error("DataIntegrityHandler.getTransformationProblematics: Failed.", res["Message"])
return res
types_getProblematicsSummary = []
def export_getProblematicsSummary(self):
"""Get a summary from the Problematics table from the IntegrityDB"""
self.log.info("DataIntegrityHandler.getProblematicsSummary: Getting problematics summary.")
res = self.dataIntegrityDB.getProblematicsSummary()
if res["OK"]:
for prognosis, statusDict in res["Value"].items():
self.log.info(f"DataIntegrityHandler.getProblematicsSummary: {prognosis}.")
for status, count in statusDict.items():
self.log.info("DataIntegrityHandler.getProblematicsSummary: \t%-10s %-10s." % (status, str(count)))
else:
self.log.error("DataIntegrityHandler.getProblematicsSummary: Failed to get summary.", res["Message"])
return res
types_getDistinctPrognosis = []
def export_getDistinctPrognosis(self):
"""Get a list of the distinct prognosis from the IntegrityDB"""
self.log.info("DataIntegrityHandler.getDistinctPrognosis: Getting distinct prognosis.")
res = self.dataIntegrityDB.getDistinctPrognosis()
if res["OK"]:
for prognosis in res["Value"]:
self.log.info(f"DataIntegrityHandler.getDistinctPrognosis: \t{prognosis}.")
else:
self.log.error("DataIntegrityHandler.getDistinctPrognosis: Failed to get unique prognosis.", res["Message"])
return res
class DataIntegrityHandler(DataIntegrityHandlerMixin, RequestHandler):
pass |
299,713 | check memory usage | # Copyright 2016 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import time
from typing import Optional, Tuple, cast
import psutil
from pants.engine.fs import PathGlobs, Snapshot
from pants.engine.internals.scheduler import ExecutionTimeoutError
from pants.init.engine_initializer import GraphScheduler
from pants.pantsd.service.pants_service import PantsService
from pants.util.strutil import softwrap
class SchedulerService(PantsService):
"""The pantsd scheduler service.
This service uses the scheduler to watch the filesystem and determine whether pantsd needs to
restart in order to reload its state.
"""
# The interval on which we will long-poll the invalidation globs. If a glob changes, the poll
# will return immediately, so this value primarily affects how frequently the `run` method
# will check the terminated condition.
INVALIDATION_POLL_INTERVAL = 0.5
# A grace period after startup that we will wait before enforcing our pid.
PIDFILE_GRACE_PERIOD = 5
def __init__(
self,
*,
graph_scheduler: GraphScheduler,
build_root: str,
invalidation_globs: Tuple[str, ...],
pidfile: str,
pid: int,
max_memory_usage_in_bytes: int,
) -> None:
"""
:param graph_scheduler: The GraphScheduler instance for graph construction.
:param build_root: The current build root.
:param invalidation_globs: A tuple of `globs` that when encountered in filesystem event
subscriptions will tear down the daemon.
:param pidfile: A pidfile which should contain this processes' pid in order for the daemon
to remain valid.
:param pid: This processes' pid.
:param max_memory_usage_in_bytes: The maximum memory usage of the process: the service will
shut down if it observes more than this amount in use.
"""
super().__init__()
self._graph_helper = graph_scheduler
self._build_root = build_root
self._scheduler = graph_scheduler.scheduler
# This session is only used for checking whether any invalidation globs have been invalidated.
# It is not involved with a build itself; just with deciding when we should restart pantsd.
self._scheduler_session = self._scheduler.new_session(
build_id="scheduler_service_session",
)
self._logger = logging.getLogger(__name__)
# NB: We declare these as a single field so that they can be changed atomically.
self._invalidation_globs_and_snapshot: Tuple[Tuple[str, ...], Optional[Snapshot]] = (
invalidation_globs,
None,
)
self._pidfile = pidfile
self._pid = pid
self._max_memory_usage_in_bytes = max_memory_usage_in_bytes
def _get_snapshot(self, globs: Tuple[str, ...], poll: bool) -> Optional[Snapshot]:
"""Returns a Snapshot of the input globs.
If poll=True, will wait for up to INVALIDATION_POLL_INTERVAL for the globs to have changed,
and will return None if they have not changed.
"""
timeout = self.INVALIDATION_POLL_INTERVAL if poll else None
try:
snapshot = self._scheduler_session.product_request(
Snapshot,
subjects=[PathGlobs(globs)],
poll=poll,
timeout=timeout,
)[0]
return cast(Snapshot, snapshot)
except ExecutionTimeoutError:
if poll:
return None
raise
def _check_invalidation_globs(self, poll: bool):
"""Check the digest of our invalidation Snapshot and exit if it has changed."""
globs, invalidation_snapshot = self._invalidation_globs_and_snapshot
assert invalidation_snapshot is not None, "Should have been eagerly initialized in run."
snapshot = self._get_snapshot(globs, poll=poll)
if snapshot is None or snapshot.digest == invalidation_snapshot.digest:
return
before = set(invalidation_snapshot.files + invalidation_snapshot.dirs)
after = set(snapshot.files + snapshot.dirs)
added = after - before
removed = before - after
if added or removed:
description = f"+{added or '{}'}, -{removed or '{}'}"
else:
description = f"content changed ({snapshot.digest} fs {invalidation_snapshot.digest})"
self._logger.critical(
f"saw filesystem changes covered by invalidation globs: {description}. terminating the daemon."
)
self.terminate()
def _check_pidfile(self):
try:
with open(self._pidfile) as f:
pid_from_file = f.read()
except OSError:
raise Exception(f"Could not read pants pidfile at {self._pidfile}.")
if int(pid_from_file) != self._pid:
raise Exception(f"Another instance of pantsd is running at {pid_from_file}")
def METHOD_NAME(self):
memory_usage_in_bytes = psutil.Process(self._pid).memory_info()[0]
if memory_usage_in_bytes > self._max_memory_usage_in_bytes:
bytes_per_mib = 1_048_576
raise Exception(
softwrap(
f"""
pantsd process {self._pid} was using {memory_usage_in_bytes / bytes_per_mib:.2f}
MiB of memory (above the `--pantsd-max-memory-usage` limit of
{self._max_memory_usage_in_bytes / bytes_per_mib:.2f} MiB).
"""
)
)
def _check_invalidation_watcher_liveness(self):
self._scheduler.check_invalidation_watcher_liveness()
def run(self):
"""Main service entrypoint."""
# N.B. We compute the invalidating fileset eagerly at launch with an assumption that files
# that exist at startup are the only ones that can affect the running daemon.
globs, _ = self._invalidation_globs_and_snapshot
self._invalidation_globs_and_snapshot = (globs, self._get_snapshot(globs, poll=False))
self._logger.debug(f"watching invalidation patterns: {globs}")
pidfile_deadline = time.time() + self.PIDFILE_GRACE_PERIOD
while not self._state.is_terminating:
try:
self._state.maybe_pause()
self._check_invalidation_watcher_liveness()
self.METHOD_NAME()
if time.time() > pidfile_deadline:
self._check_pidfile()
# NB: This is a long poll that will keep us from looping too quickly here.
self._check_invalidation_globs(poll=True)
except Exception as e:
# Watcher failed for some reason
self._logger.critical(f"The scheduler was invalidated: {e!r}")
self.terminate()
self._scheduler_session.cancel() |
299,714 | start document | # -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
import codecs
import xml.dom.minidom
import xml.sax
import xml.sax.handler
from log_parser import BatchResultParser, StatusCode
STYLESHEET_FILENAME = "testlog.xsl"
LOG_VERSION = '0.3.2'
class BuildXMLLogHandler(xml.sax.handler.ContentHandler):
def __init__ (self, doc):
self.doc = doc
self.elementStack = []
self.rootElements = []
def getRootElements (self):
return self.rootElements
def pushElement (self, elem):
if len(self.elementStack) == 0:
self.rootElements.append(elem)
else:
self.getCurElement().appendChild(elem)
self.elementStack.append(elem)
def popElement (self):
self.elementStack.pop()
def getCurElement (self):
if len(self.elementStack) > 0:
return self.elementStack[-1]
else:
return None
def METHOD_NAME (self):
pass
def endDocument (self):
pass
def startElement (self, name, attrs):
elem = self.doc.createElement(name)
for name in attrs.getNames():
value = attrs.getValue(name)
elem.setAttribute(name, value)
self.pushElement(elem)
def endElement (self, name):
self.popElement()
def characters (self, content):
# Discard completely empty content
if len(content.strip()) == 0:
return
# Append as text node (not pushed to stack)
if self.getCurElement() != None:
txt = self.doc.createTextNode(content)
self.getCurElement().appendChild(txt)
class LogErrorHandler(xml.sax.handler.ErrorHandler):
def __init__ (self):
pass
def error (self, err):
#print("error(%s)" % str(err))
pass
def fatalError (self, err):
#print("fatalError(%s)" % str(err))
pass
def warning (self, warn):
#print("warning(%s)" % str(warn))
pass
def findFirstElementByName (nodes, name):
for node in nodes:
if node.nodeName == name:
return node
chFound = findFirstElementByName(node.childNodes, name)
if chFound != None:
return chFound
return None
# Normalizes potentially broken (due to crash for example) log data to XML element tree
def normalizeToXml (result, doc):
handler = BuildXMLLogHandler(doc)
errHandler = LogErrorHandler()
xml.sax.parseString(result.log, handler, errHandler)
rootNodes = handler.getRootElements()
# Check if we have TestCaseResult
testCaseResult = findFirstElementByName(rootNodes, 'TestCaseResult')
if testCaseResult == None:
# Create TestCaseResult element
testCaseResult = doc.createElement('TestCaseResult')
testCaseResult.setAttribute('CasePath', result.name)
testCaseResult.setAttribute('CaseType', 'SelfValidate') # \todo [pyry] Not recoverable..
testCaseResult.setAttribute('Version', LOG_VERSION)
rootNodes.append(testCaseResult)
# Check if we have Result element
resultElem = findFirstElementByName(rootNodes, 'Result')
if resultElem == None:
# Create result element
resultElem = doc.createElement('Result')
resultElem.setAttribute('StatusCode', result.statusCode)
resultElem.appendChild(doc.createTextNode(result.statusDetails))
testCaseResult.appendChild(resultElem)
return rootNodes
def logToXml (logFilePath, outFilePath):
# Initialize Xml Document
dstDoc = xml.dom.minidom.Document()
batchResultNode = dstDoc.createElement('BatchResult')
batchResultNode.setAttribute("FileName", os.path.basename(logFilePath))
dstDoc.appendChild(batchResultNode)
# Initialize dictionary for counting status codes
countByStatusCode = {}
for code in StatusCode.STATUS_CODES:
countByStatusCode[code] = 0
# Write custom headers
out = codecs.open(outFilePath, "wb", encoding="utf-8")
out.write("<?xml version=\"1.0\"?>\n")
out.write("<?xml-stylesheet href=\"%s\" type=\"text/xsl\"?>\n" % STYLESHEET_FILENAME)
summaryElem = dstDoc.createElement('ResultTotals')
batchResultNode.appendChild(summaryElem)
# Print the first line manually <BatchResult FileName=something.xml>
out.write(dstDoc.toprettyxml().splitlines()[1])
out.write("\n")
parser = BatchResultParser()
parser.init(logFilePath)
logFile = open(logFilePath, 'rb')
result = parser.getNextTestCaseResult(logFile)
while result is not None:
countByStatusCode[result.statusCode] += 1
rootNodes = normalizeToXml(result, dstDoc)
for node in rootNodes:
# Do not append TestResults to dstDoc to save memory.
# Instead print them directly to the file and add tabs manually.
for line in node.toprettyxml().splitlines():
out.write("\t" + line + "\n")
result = parser.getNextTestCaseResult(logFile)
# Calculate the totals to add at the end of the Xml file
for code in StatusCode.STATUS_CODES:
summaryElem.setAttribute(code, "%d" % countByStatusCode[code])
summaryElem.setAttribute('All', "%d" % sum(countByStatusCode.values()))
# Print the test totals and finish the Xml Document"
for line in dstDoc.toprettyxml().splitlines()[2:]:
out.write(line + "\n")
out.close()
logFile.close()
if __name__ == "__main__":
if len(sys.argv) != 3:
print("%s: [test log] [dst file]" % sys.argv[0])
sys.exit(-1)
logToXml(sys.argv[1], sys.argv[2]) |
299,715 | show progress dialog | # SPDX-License-Identifier: LGPL-2.1-or-later
# ***************************************************************************
# * *
# * Copyright (c) 2022 FreeCAD Project Association *
# * *
# * This file is part of FreeCAD. *
# * *
# * FreeCAD is free software: you can redistribute it and/or modify it *
# * under the terms of the GNU Lesser General Public License as *
# * published by the Free Software Foundation, either version 2.1 of the *
# * License, or (at your option) any later version. *
# * *
# * FreeCAD is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU *
# * Lesser General Public License for more details. *
# * *
# * You should have received a copy of the GNU Lesser General Public *
# * License along with FreeCAD. If not, see *
# * <https://www.gnu.org/licenses/>. *
# * *
# ***************************************************************************
"""GUI functions for uninstalling an Addon or Macro."""
import FreeCAD
import FreeCADGui
from PySide import QtCore, QtWidgets
from addonmanager_uninstaller import AddonUninstaller, MacroUninstaller
import addonmanager_utilities as utils
translate = FreeCAD.Qt.translate
class AddonUninstallerGUI(QtCore.QObject):
"""User interface for uninstalling an Addon: asks for confirmation, displays a progress dialog,
displays completion and/or error dialogs, and emits the finished() signal when all work is
complete."""
finished = QtCore.Signal()
def __init__(self, addon_to_remove):
super().__init__()
self.addon_to_remove = addon_to_remove
if hasattr(self.addon_to_remove, "macro") and self.addon_to_remove.macro is not None:
self.uninstaller = MacroUninstaller(self.addon_to_remove)
else:
self.uninstaller = AddonUninstaller(self.addon_to_remove)
self.uninstaller.success.connect(self._succeeded)
self.uninstaller.failure.connect(self._failed)
self.worker_thread = QtCore.QThread()
self.uninstaller.moveToThread(self.worker_thread)
self.uninstaller.finished.connect(self.worker_thread.quit)
self.worker_thread.started.connect(self.uninstaller.run)
self.progress_dialog = None
self.dialog_timer = QtCore.QTimer()
self.dialog_timer.timeout.connect(self.METHOD_NAME)
self.dialog_timer.setSingleShot(True)
self.dialog_timer.setInterval(1000) # Can override from external (e.g. testing) code
def run(self):
"""Begin the user interaction: asynchronous, only blocks while showing the initial modal
confirmation dialog."""
ok_to_proceed = self._confirm_uninstallation()
if not ok_to_proceed:
self._finalize()
return
self.dialog_timer.start()
self._run_uninstaller()
def _confirm_uninstallation(self) -> bool:
"""Present a modal dialog asking the user if they really want to uninstall. Returns True to
continue with the uninstallation, or False to stop the process."""
confirm = QtWidgets.QMessageBox.question(
utils.get_main_am_window(),
translate("AddonsInstaller", "Confirm remove"),
translate("AddonsInstaller", "Are you sure you want to uninstall {}?").format(
self.addon_to_remove.display_name
),
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel,
)
return confirm == QtWidgets.QMessageBox.Yes
def METHOD_NAME(self):
self.progress_dialog = QtWidgets.QMessageBox(
QtWidgets.QMessageBox.NoIcon,
translate("AddonsInstaller", "Removing Addon"),
translate("AddonsInstaller", "Removing {}").format(self.addon_to_remove.display_name)
+ "...",
QtWidgets.QMessageBox.Cancel,
parent=utils.get_main_am_window(),
)
self.progress_dialog.rejected.connect(self._cancel_removal)
self.progress_dialog.show()
def _run_uninstaller(self):
self.worker_thread.start()
def _cancel_removal(self):
"""Ask the QThread to interrupt. Probably has no effect, most of the work is in a single OS
call."""
self.worker_thread.requestInterruption()
def _succeeded(self, addon):
"""Callback for successful removal"""
self.dialog_timer.stop()
if self.progress_dialog:
self.progress_dialog.hide()
QtWidgets.QMessageBox.information(
utils.get_main_am_window(),
translate("AddonsInstaller", "Uninstall complete"),
translate("AddonInstaller", "Finished removing {}").format(addon.display_name),
)
self._finalize()
def _failed(self, addon, message):
"""Callback for failed or partially failed removal"""
self.dialog_timer.stop()
if self.progress_dialog:
self.progress_dialog.hide()
QtWidgets.QMessageBox.critical(
utils.get_main_am_window(),
translate("AddonsInstaller", "Uninstall failed"),
translate("AddonInstaller", "Failed to remove some files") + ":\n" + message,
)
self._finalize()
def _finalize(self):
"""Clean up and emit finished signal"""
if self.worker_thread.isRunning():
self.worker_thread.requestInterruption()
self.worker_thread.quit()
self.worker_thread.wait(500)
self.finished.emit() |
299,716 | wrap array like | import numbers
import operator
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
# NOTE: This class should be kept as an exact copy of the example from the
# docstring for NDArrayOperatorsMixin.
class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
def __init__(self, value):
self.value = np.asarray(value)
# One might also consider adding the built-in list type to this
# list, to support operations like np.add(array_like, list)
_HANDLED_TYPES = (np.ndarray, numbers.Number)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
out = kwargs.get('out', ())
for x in inputs + out:
# Only support operations with instances of _HANDLED_TYPES.
# Use ArrayLike instead of type(self) for isinstance to
# allow subclasses that don't override __array_ufunc__ to
# handle ArrayLike objects.
if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
return NotImplemented
# Defer to the implementation of the ufunc on unwrapped values.
inputs = tuple(x.value if isinstance(x, ArrayLike) else x
for x in inputs)
if out:
kwargs['out'] = tuple(
x.value if isinstance(x, ArrayLike) else x
for x in out)
result = getattr(ufunc, method)(*inputs, **kwargs)
if type(result) is tuple:
# multiple return values
return tuple(type(self)(x) for x in result)
elif method == 'at':
# no return value
return None
else:
# one return value
return type(self)(result)
def __repr__(self):
return '%s(%r)' % (type(self).__name__, self.value)
def METHOD_NAME(result):
if type(result) is tuple:
return tuple(ArrayLike(r) for r in result)
else:
return ArrayLike(result)
def _assert_equal_type_and_value(result, expected, err_msg=None):
assert_equal(type(result), type(expected), err_msg=err_msg)
if isinstance(result, tuple):
assert_equal(len(result), len(expected), err_msg=err_msg)
for result_item, expected_item in zip(result, expected):
_assert_equal_type_and_value(result_item, expected_item, err_msg)
else:
assert_equal(result.value, expected.value, err_msg=err_msg)
assert_equal(getattr(result.value, 'dtype', None),
getattr(expected.value, 'dtype', None), err_msg=err_msg)
_ALL_BINARY_OPERATORS = [
operator.lt,
operator.le,
operator.eq,
operator.ne,
operator.gt,
operator.ge,
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.mod,
divmod,
pow,
operator.lshift,
operator.rshift,
operator.and_,
operator.xor,
operator.or_,
]
class TestNDArrayOperatorsMixin:
def test_array_like_add(self):
def check(result):
_assert_equal_type_and_value(result, ArrayLike(0))
check(ArrayLike(0) + 0)
check(0 + ArrayLike(0))
check(ArrayLike(0) + np.array(0))
check(np.array(0) + ArrayLike(0))
check(ArrayLike(np.array(0)) + 0)
check(0 + ArrayLike(np.array(0)))
check(ArrayLike(np.array(0)) + np.array(0))
check(np.array(0) + ArrayLike(np.array(0)))
def test_inplace(self):
array_like = ArrayLike(np.array([0]))
array_like += 1
_assert_equal_type_and_value(array_like, ArrayLike(np.array([1])))
array = np.array([0])
array += ArrayLike(1)
_assert_equal_type_and_value(array, ArrayLike(np.array([1])))
def test_opt_out(self):
class OptOut:
"""Object that opts out of __array_ufunc__."""
__array_ufunc__ = None
def __add__(self, other):
return self
def __radd__(self, other):
return self
array_like = ArrayLike(1)
opt_out = OptOut()
# supported operations
assert_(array_like + opt_out is opt_out)
assert_(opt_out + array_like is opt_out)
# not supported
with assert_raises(TypeError):
# don't use the Python default, array_like = array_like + opt_out
array_like += opt_out
with assert_raises(TypeError):
array_like - opt_out
with assert_raises(TypeError):
opt_out - array_like
def test_subclass(self):
class SubArrayLike(ArrayLike):
"""Should take precedence over ArrayLike."""
x = ArrayLike(0)
y = SubArrayLike(1)
_assert_equal_type_and_value(x + y, y)
_assert_equal_type_and_value(y + x, y)
def test_object(self):
x = ArrayLike(0)
obj = object()
with assert_raises(TypeError):
x + obj
with assert_raises(TypeError):
obj + x
with assert_raises(TypeError):
x += obj
def test_unary_methods(self):
array = np.array([-1, 0, 1, 2])
array_like = ArrayLike(array)
for op in [operator.neg,
operator.pos,
abs,
operator.invert]:
_assert_equal_type_and_value(op(array_like), ArrayLike(op(array)))
def test_forward_binary_methods(self):
array = np.array([-1, 0, 1, 2])
array_like = ArrayLike(array)
for op in _ALL_BINARY_OPERATORS:
expected = METHOD_NAME(op(array, 1))
actual = op(array_like, 1)
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
def test_reflected_binary_methods(self):
for op in _ALL_BINARY_OPERATORS:
expected = METHOD_NAME(op(2, 1))
actual = op(2, ArrayLike(1))
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
def test_matmul(self):
array = np.array([1, 2], dtype=np.float64)
array_like = ArrayLike(array)
expected = ArrayLike(np.float64(5))
_assert_equal_type_and_value(expected, np.matmul(array_like, array))
_assert_equal_type_and_value(
expected, operator.matmul(array_like, array))
_assert_equal_type_and_value(
expected, operator.matmul(array, array_like))
def test_ufunc_at(self):
array = ArrayLike(np.array([1, 2, 3, 4]))
assert_(np.negative.at(array, np.array([0, 1])) is None)
_assert_equal_type_and_value(array, ArrayLike([-1, -2, 3, 4]))
def test_ufunc_two_outputs(self):
mantissa, exponent = np.frexp(2 ** -3)
expected = (ArrayLike(mantissa), ArrayLike(exponent))
_assert_equal_type_and_value(
np.frexp(ArrayLike(2 ** -3)), expected)
_assert_equal_type_and_value(
np.frexp(ArrayLike(np.array(2 ** -3))), expected) |
299,717 | zip from subtitle file | from collections import namedtuple
from difflib import SequenceMatcher
import io
import logging
import os
import re
import tempfile
from typing import Iterable, Union
import zipfile
from guessit import guessit
import pysubs2
import rarfile
from subliminal.subtitle import fix_line_ending
from subliminal_patch.core import Episode
from subliminal_patch.subtitle import guess_matches
from ._agent_list import FIRST_THOUSAND_OR_SO_USER_AGENTS
logger = logging.getLogger(__name__)
_MatchingSub = namedtuple("_MatchingSub", ("file", "priority", "context"))
def _get_matching_sub(
sub_names, forced=False, episode=None, episode_title=None, **kwargs
):
guess_options = {"single_value": True}
if episode is not None:
guess_options["type"] = "episode" # type: ignore
matching_subs = []
for sub_name in sub_names:
if not forced and os.path.splitext(sub_name.lower())[0].endswith("forced"):
logger.debug("Ignoring forced subtitle: %s", sub_name)
continue
# If it's a movie then get the first subtitle
if episode is None and episode_title is None:
logger.debug("Movie subtitle found: %s", sub_name)
matching_subs.append(_MatchingSub(sub_name, 2, "Movie subtitle"))
break
guess = guessit(sub_name, options=guess_options)
matched_episode_num = guess.get("episode")
if matched_episode_num:
logger.debug("No episode number found in file: %s", sub_name)
if episode_title is not None:
from_name = _analize_sub_name(sub_name, episode_title)
if from_name is not None:
matching_subs.append(from_name)
if episode == matched_episode_num:
logger.debug("Episode matched from number: %s", sub_name)
matching_subs.append(_MatchingSub(sub_name, 2, "Episode number matched"))
if matching_subs:
matching_subs.sort(key=lambda x: x.priority, reverse=True)
logger.debug("Matches: %s", matching_subs)
return matching_subs[0].file
else:
logger.debug("Nothing matched")
return None
def _analize_sub_name(sub_name: str, title_):
titles = re.split(r"[.-]", os.path.splitext(sub_name)[0])
for title in titles:
title = title.strip()
ratio = SequenceMatcher(None, title, title_).ratio()
if ratio > 0.85:
logger.debug(
"Episode title matched: '%s' -> '%s' [%s]", title, sub_name, ratio
)
# Avoid false positives with short titles
if len(title_) > 4 and ratio >= 0.98:
return _MatchingSub(sub_name, 3, "Perfect title ratio")
return _MatchingSub(sub_name, 1, "Normal title ratio")
logger.debug("No episode title matched from file: %s", sub_name)
return None
def get_subtitle_from_archive(
archive, forced=False, episode=None, get_first_subtitle=False, **kwargs
):
"Get subtitle from Rarfile/Zipfile object. Return None if nothing is found."
subs_in_archive = [
name
for name in archive.namelist()
if name.endswith((".srt", ".sub", ".ssa", ".ass"))
]
if not subs_in_archive:
logger.info("No subtitles found in archive")
return None
logger.debug("Subtitles in archive: %s", subs_in_archive)
if len(subs_in_archive) == 1 or get_first_subtitle:
logger.debug("Getting first subtitle in archive: %s", subs_in_archive)
return fix_line_ending(archive.read(subs_in_archive[0]))
matching_sub = _get_matching_sub(subs_in_archive, forced, episode, **kwargs)
if matching_sub is not None:
logger.info("Using %s from archive", matching_sub)
return fix_line_ending(archive.read(matching_sub))
logger.debug("No subtitle found in archive")
return None
def is_episode(content):
return "episode" in guessit(content, {"type": "episode"})
_ENCS = ("utf-8", "ascii", "iso-8859-1", "iso-8859-2", "iso-8859-5", "cp1252")
def METHOD_NAME(content):
with tempfile.NamedTemporaryFile(prefix="spsub", suffix=".srt") as tmp_f:
tmp_f.write(content)
sub = None
for enc in _ENCS:
try:
logger.debug("Trying %s encoding", enc)
sub = pysubs2.load(tmp_f.name, encoding=enc)
except Exception as error:
logger.debug("%s: %s", type(error).__name__, error)
continue
else:
break
if sub is not None:
logger.debug("Identified subtitle file: %s", sub)
zip_obj = zipfile.ZipFile(io.BytesIO(), mode="x")
zip_obj.write(tmp_f.name, os.path.basename(tmp_f.name))
return zip_obj
logger.debug("Couldn't load subtitle file")
return None
def get_archive_from_bytes(content: bytes):
"""Get RarFile/ZipFile object from bytes. A ZipFile instance will be returned
if a subtitle-like stream is found. Return None if something else is found."""
archive_stream = io.BytesIO(content)
if rarfile.is_rarfile(archive_stream):
logger.debug("Identified rar archive")
return rarfile.RarFile(archive_stream)
elif zipfile.is_zipfile(archive_stream):
logger.debug("Identified zip archive")
return zipfile.ZipFile(archive_stream)
logger.debug("No compression format found. Trying with subtitle-like files")
return METHOD_NAME(content)
def update_matches(
matches,
video,
release_info: Union[str, Iterable[str]],
split="\n",
**guessit_options
):
"""Update matches set from release info string or Iterable.
Use the split parameter to iterate over the set delimiter; set None to avoid split."""
guessit_options["type"] = "episode" if isinstance(video, Episode) else "movie"
logger.debug("Guessit options to update matches: %s", guessit_options)
if isinstance(release_info, str):
release_info = release_info.split(split)
for release in release_info:
for release_split in release.split(split):
logger.debug("Updating matches from release info: %s", release)
matches |= guess_matches(
video, guessit(release_split.strip(), guessit_options)
)
logger.debug("New matches: %s", matches)
return matches |
299,718 | parse xlets | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Handle Airflow inlets and outlets.
We can have different scenarios of inlets and outlets:
```
t1 = BashOperator(
task_id="print_date",
bash_command="date",
inlets={
"tables": ["A"]
},
)
t2 = BashOperator(
task_id="sleep",
bash_command="sleep 1",
outlets={
"tables": ["B"]
},
)
```
we'll join the keys and get XLet(inlets=[A], outlets=[B])
But we can also have a pipeline working on different sets of tables:
```
t1 = BashOperator(
task_id="print_date",
bash_command="date",
inlets={
"tables": ["A"],
"more_tables": ["X"]
},
)
t2 = BashOperator(
task_id="sleep",
bash_command="sleep 1",
outlets={
"tables": ["B"],
"more_tables": ["Y", "Z"]
},
)
```
we'll join the keys and get [
XLet(inlets=[A], outlets=[B]),
XLet(inlets=[X], outlets=[Y, Z]),
]
and we'll treat this as independent sets of lineage
"""
import logging
import traceback
from enum import Enum
from typing import Dict, List, Optional, Set
from pydantic import BaseModel
logger = logging.getLogger("airflow.task")
class XLetsMode(Enum):
INLETS = "inlets"
OUTLETS = "outlets"
class XLetsAttr(Enum):
INLETS = "inlets"
PRIVATE_INLETS = "_inlets"
OUTLETS = "outlets"
PRIVATE_OUTLETS = "_outlets"
class XLets(BaseModel):
"""
Group inlets and outlets from all tasks in a DAG
"""
inlets: Set[str]
outlets: Set[str]
def METHOD_NAME(xlet: List[dict]) -> Optional[Dict[str, List[str]]]:
"""
Parse airflow xlets for V1
:param xlet: airflow v2 xlet dict
:return: dictionary of xlet list or None
[{'__var': {'tables': ['sample_data.ecommerce_db.shopify.fact_order']},
'__type': 'dict'}]
"""
# This branch is for lineage parser op
if isinstance(xlet, list) and len(xlet) and isinstance(xlet[0], dict):
xlet_dict = xlet[0]
# This is how the Serialized DAG is giving us the info from _inlets & _outlets
if isinstance(xlet_dict, dict) and xlet_dict.get("__var"):
xlet_dict = xlet_dict["__var"]
return {
key: value for key, value in xlet_dict.items() if isinstance(value, list)
}
return None
def get_xlets_from_operator(
operator: "BaseOperator", xlet_mode: XLetsMode
) -> Optional[Dict[str, List[str]]]:
"""
Given an Airflow DAG Task, obtain the tables
set in inlets or outlets.
We expect xlets to have the following structure:
[{'tables': ['FQN']}]
:param operator: task to get xlets from
:param xlet_mode: get inlet or outlet
:return: list of tables FQN
"""
attribute = None
if xlet_mode == XLetsMode.INLETS:
attribute = (
XLetsAttr.INLETS.value
if hasattr(operator, XLetsAttr.INLETS.value)
else XLetsAttr.PRIVATE_INLETS.value
)
if xlet_mode == XLetsMode.OUTLETS:
attribute = (
XLetsAttr.OUTLETS.value
if hasattr(operator, XLetsAttr.OUTLETS.value)
else XLetsAttr.PRIVATE_OUTLETS.value
)
if attribute is None:
raise ValueError(f"Missing attribute for {xlet_mode.value}")
xlet = getattr(operator, attribute) or []
xlet_data = METHOD_NAME(xlet)
if not xlet_data:
logger.debug(f"Not finding proper {xlet_mode} in task {operator.task_id}")
else:
logger.info(f"Found {xlet_mode} {xlet_data} in task {operator.task_id}")
return xlet_data
def get_xlets_from_dag(dag: "DAG") -> List[XLets]:
"""
Fill the inlets and outlets of the Pipeline by iterating
over all its tasks
"""
_inlets = {}
_outlets = {}
# First, grab all the inlets and outlets from all tasks grouped by keys
for task in dag.tasks:
try:
_inlets.update(
get_xlets_from_operator(
operator=task,
xlet_mode=XLetsMode.INLETS,
)
or []
)
_outlets.update(
get_xlets_from_operator(
operator=task,
xlet_mode=XLetsMode.OUTLETS,
)
or []
)
except Exception as exc:
error_msg = (
f"Error while getting inlets and outlets for task - {task} - {exc}"
)
logger.error(error_msg)
logger.error(traceback.format_exc())
# We expect to have the same keys in both inlets and outlets dicts
# We will then iterate over the inlet keys to build the list of XLets
return [
XLets(inlets=set(value), outlets=set(_outlets[key]))
for key, value in _inlets.items()
if value and _outlets.get(key)
] |
299,719 | get service env | from collections import OrderedDict
from django.core.urlresolvers import reverse
from rest_framework import serializers
from ralph.data_center.models.choices import RackOrientation
from ralph.data_center.models.physical import (
DataCenterAsset,
Rack,
RackAccessory,
ServerRoom
)
TYPE_EMPTY = 'empty'
TYPE_ACCESSORY = 'accessory'
TYPE_ASSET = 'asset'
TYPE_PDU = 'pdu'
class AdminLinkMixin(serializers.ModelSerializer):
"""
A field that returns object's admin url
"""
def admin_link(self, obj):
if isinstance(obj, OrderedDict):
return ""
return reverse('admin:{app_label}_{model_name}_change'.format(
app_label=obj._meta.app_label,
model_name=obj._meta.model_name,
), args=(obj.id,))
class DataCenterAssetSerializerBase(serializers.ModelSerializer):
model = serializers.CharField(source='model.name')
service = serializers.SerializerMethodField('get_service_env')
orientation = serializers.CharField(source='get_orientation_desc')
url = serializers.CharField(source='get_absolute_url')
def METHOD_NAME(self, obj):
try:
service_name = obj.service_env.service.name
except AttributeError:
service_name = ''
return str(service_name)
def get_orientation_desc(self, obj):
return obj.get_orientation_desc()
class ServerRoomtSerializer(serializers.ModelSerializer):
class Meta:
model = ServerRoom
fields = (
'id', 'name'
)
class RelatedAssetSerializer(DataCenterAssetSerializerBase):
class Meta:
model = DataCenterAsset
fields = (
'id', 'model', 'barcode', 'sn', 'slot_no', 'hostname', 'service',
'orientation', 'url',
)
class DataCenterAssetSerializer(DataCenterAssetSerializerBase):
category = serializers.CharField(source='model.category.name')
height = serializers.FloatField(source='model.height_of_device')
front_layout = serializers.CharField(
source='model.get_front_layout_class'
)
back_layout = serializers.CharField(source='model.get_back_layout_class')
children = RelatedAssetSerializer(
source='get_related_assets',
many=True,
)
_type = serializers.SerializerMethodField('get_type')
management_ip = serializers.SerializerMethodField('get_management')
orientation = serializers.SerializerMethodField('get_orientation_desc')
url = serializers.CharField(source='get_absolute_url')
def get_type(self, obj):
return TYPE_ASSET
def get_management(self, obj):
return obj.management_ip or ''
class Meta:
model = DataCenterAsset
fields = (
'id', 'model', 'category', 'height', 'front_layout',
'back_layout', 'barcode', 'sn', 'position',
'children', '_type', 'hostname', 'management_ip',
'orientation', 'service', 'remarks', 'url',
)
class RackAccessorySerializer(serializers.ModelSerializer):
type = serializers.CharField(source='accessory.name')
_type = serializers.SerializerMethodField('get_type')
orientation = serializers.SerializerMethodField('get_orientation_desc')
url = serializers.CharField(source='get_absolute_url')
def get_type(self, obj):
return TYPE_ACCESSORY
def get_orientation_desc(self, obj):
return obj.get_orientation_desc()
class Meta:
model = RackAccessory
fields = ('position', 'orientation', 'remarks', 'type', '_type', 'url')
class PDUSerializer(serializers.ModelSerializer):
model = serializers.CharField(source='model.name')
orientation = serializers.CharField(source='get_orientation_desc')
url = serializers.CharField(source='get_absolute_url')
def get_type(self, obj):
return TYPE_PDU
class Meta:
model = DataCenterAsset
fields = ('model', 'sn', 'orientation', 'url')
class RackBaseSerializer(serializers.ModelSerializer):
free_u = serializers.IntegerField(source='get_free_u', read_only=True)
orientation = serializers.CharField(source='get_orientation_desc')
class Meta:
model = Rack
fields = (
'id', 'name', 'server_room', 'max_u_height',
'visualization_col', 'visualization_row', 'free_u', 'description',
'orientation', 'reverse_ordering'
)
def update(self, data):
data['server_room'] = ServerRoom.objects.get(
pk=data['server_room']
)
data['orientation'] = RackOrientation.id_from_name(data['orientation'])
return super(RackBaseSerializer, self).update(self.instance, data)
def create(self, data):
data['orientation'] = RackOrientation.id_from_name(data['orientation'])
data['server_room'] = ServerRoom.objects.get(
pk=int(data['server_room'])
)
return Rack.objects.create(**data)
class RackSerializer(AdminLinkMixin, RackBaseSerializer):
rack_admin_url = serializers.SerializerMethodField('admin_link')
class Meta(RackBaseSerializer.Meta):
fields = RackBaseSerializer.Meta.fields + ('rack_admin_url',)
class SRSerializer(AdminLinkMixin, serializers.ModelSerializer):
rack_set = RackSerializer(many=True)
admin_link = serializers.SerializerMethodField('admin_link')
class Meta:
model = ServerRoom
fields = (
'id', 'name', 'visualization_cols_num',
'visualization_rows_num', 'rack_set', 'admin_link'
)
depth = 1 |
299,720 | test baseline profiles | # Copyright (C) 2019 Cancer Care Associates
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import lzma
import operator
import os
from pathlib import Path
from pymedphys._imports import numpy as np
from pymedphys._imports import pandas as pd
from pymedphys._imports import pydicom, pytest
from pymedphys._data import download
from pymedphys.dicom import depth_dose, profile
from pymedphys.experimental.fileformats import load_mephysto_directory
HERE = Path(__file__).parent.resolve()
DATA_DIR = HERE.joinpath("data")
DICOM_DIR = DATA_DIR.joinpath("DICOM")
DICOM_DOSE_FILEPATHS = {"05x05": "06MV_05x05.dcm.xz", "10x10": "06MV_10x10.dcm.xz"}
DICOM_PLAN_FILEPATH = "06MV_plan.dcm"
MEASUREMENTS_DIR = DATA_DIR.joinpath("measurements")
BASELINES_DIR = DATA_DIR.joinpath("baselines")
CREATE_BASELINE = False
BASELINE_FILEPATH = os.path.join(BASELINES_DIR, "dicom_dose_profiles.json")
# pylint: disable = redefined-outer-name
@pytest.fixture
def loaded_doses():
doses = {}
for key, filepath in DICOM_DOSE_FILEPATHS.items():
resolved_filepath = str(
download.get_file_within_data_zip("tps_compare_dicom_files.zip", filepath)
)
with lzma.open(resolved_filepath) as a_file:
doses[key] = pydicom.dcmread(a_file, force=True)
return doses
@pytest.fixture
def loaded_plan():
plan = pydicom.read_file(
str(
download.get_file_within_data_zip(
"tps_compare_dicom_files.zip", DICOM_PLAN_FILEPATH
)
),
force=True,
)
return plan
@pytest.mark.pydicom
def test_bulk_compare(loaded_doses, loaded_plan):
absolute_dose_table = pd.read_csv(
MEASUREMENTS_DIR.joinpath("AbsoluteDose.csv"), index_col=0
)
absolute_dose = absolute_dose_table["d10 @ 90 SSD"]["6 MV"]
output_factors = pd.read_csv(
MEASUREMENTS_DIR.joinpath("OutputFactors.csv"), index_col=0
)
absolute_doses = {
key: output_factors[key]["6 MV"] * absolute_dose
for key in output_factors.columns
}
absolute_scans_per_field = load_mephysto_directory(
MEASUREMENTS_DIR, r"06MV_(\d\dx\d\d)\.mcc", absolute_doses, 100
)
getter = operator.itemgetter("displacement", "dose")
for key, absolute_scans in absolute_scans_per_field.items():
dose_dataset = loaded_doses[key]
depths, meas_dose = getter(absolute_scans["depth_dose"])
tps_dose = depth_dose(depths, dose_dataset, loaded_plan) / 10
diff = tps_dose - meas_dose
assert np.abs(np.mean(diff)) <= 0.02
assert np.std(diff) <= 0.05
@pytest.mark.pydicom
def METHOD_NAME(loaded_doses, loaded_plan):
baselines = {}
displacements = list(range(-100, 110, 10))
depths = list(range(0, 310, 10))
for key, dose_dataset in loaded_doses.items():
baselines[key] = {}
extracted_dose = depth_dose(depths, dose_dataset, loaded_plan)
rounded_result = np.around(extracted_dose, decimals=2)
baselines[key]["depth"] = rounded_result.tolist()
for direction in ["inplane", "crossplane"]:
baselines[key][direction] = {}
for depth in [50, 100]:
extracted_dose = profile(
displacements, depth, direction, dose_dataset, loaded_plan
)
rounded_result = np.around(extracted_dose, decimals=2)
baselines[key][direction][str(depth)] = rounded_result.tolist()
if CREATE_BASELINE:
with open(BASELINE_FILEPATH, "w") as a_file:
json.dump(baselines, a_file)
else:
with open(BASELINE_FILEPATH) as a_file:
baseline_result = json.load(a_file)
assert baseline_result == baselines |
299,721 | int or float | import os
import tqdm
import torch
import datetime
import itertools
from multiprocessing import Pool
from collections import OrderedDict, defaultdict
def print_message(*s, condition=True, pad=False):
s = ' '.join([str(x) for x in s])
msg = "[{}] {}".format(datetime.datetime.now().strftime("%b %d, %H:%M:%S"), s)
if condition:
msg = msg if not pad else f'\n{msg}\n'
print(msg, flush=True)
return msg
def timestamp(daydir=False):
format_str = f"%Y-%m{'/' if daydir else '-'}%d{'/' if daydir else '_'}%H.%M.%S"
result = datetime.datetime.now().strftime(format_str)
return result
def file_tqdm(file):
print(f"#> Reading {file.name}")
with tqdm.tqdm(total=os.path.getsize(file.name) / 1024.0 / 1024.0, unit="MiB") as pbar:
for line in file:
yield line
pbar.update(len(line) / 1024.0 / 1024.0)
pbar.close()
def torch_load_dnn(path):
if path.startswith("http:") or path.startswith("https:"):
dnn = torch.hub.load_state_dict_from_url(path, map_location='cpu')
else:
dnn = torch.load(path, map_location='cpu')
return dnn
def save_checkpoint(path, epoch_idx, mb_idx, model, optimizer, arguments=None):
print(f"#> Saving a checkpoint to {path} ..")
if hasattr(model, 'module'):
model = model.module # extract model from a distributed/data-parallel wrapper
checkpoint = {}
checkpoint['epoch'] = epoch_idx
checkpoint['batch'] = mb_idx
checkpoint['model_state_dict'] = model.state_dict()
checkpoint['optimizer_state_dict'] = optimizer.state_dict()
checkpoint['arguments'] = arguments
torch.save(checkpoint, path)
def load_checkpoint(path, model, checkpoint=None, optimizer=None, do_print=True):
if do_print:
print_message("#> Loading checkpoint", path, "..")
if checkpoint is None:
checkpoint = load_checkpoint_raw(path)
try:
model.load_state_dict(checkpoint['model_state_dict'])
except:
print_message("[WARNING] Loading checkpoint with strict=False")
model.load_state_dict(checkpoint['model_state_dict'], strict=False)
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if do_print:
print_message("#> checkpoint['epoch'] =", checkpoint['epoch'])
print_message("#> checkpoint['batch'] =", checkpoint['batch'])
return checkpoint
def load_checkpoint_raw(path):
if path.startswith("http:") or path.startswith("https:"):
checkpoint = torch.hub.load_state_dict_from_url(path, map_location='cpu')
else:
checkpoint = torch.load(path, map_location='cpu')
state_dict = checkpoint['model_state_dict']
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k
if k[:7] == 'module.':
name = k[7:]
new_state_dict[name] = v
checkpoint['model_state_dict'] = new_state_dict
return checkpoint
def create_directory(path):
if os.path.exists(path):
print('\n')
print_message("#> Note: Output directory", path, 'already exists\n\n')
else:
print('\n')
print_message("#> Creating directory", path, '\n\n')
os.makedirs(path)
def f7(seq):
"""
Source: https://stackoverflow.com/a/480227/1493011
"""
seen = set()
return [x for x in seq if not (x in seen or seen.add(x))]
def batch(group, bsize, provide_offset=False):
offset = 0
while offset < len(group):
L = group[offset: offset + bsize]
yield ((offset, L) if provide_offset else L)
offset += len(L)
return
class dotdict(dict):
"""
dot.notation access to dictionary attributes
Credit: derek73 @ https://stackoverflow.com/questions/2352181
"""
__getattr__ = dict.__getitem__
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
class dotdict_lax(dict):
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def flatten(L):
result = []
for _list in L:
result += _list
return result
def zipstar(L, lazy=False):
"""
A much faster A, B, C = zip(*[(a, b, c), (a, b, c), ...])
May return lists or tuples.
"""
if len(L) == 0:
return L
width = len(L[0])
if width < 100:
return [[elem[idx] for elem in L] for idx in range(width)]
L = zip(*L)
return L if lazy else list(L)
def zip_first(L1, L2):
length = len(L1) if type(L1) in [tuple, list] else None
L3 = list(zip(L1, L2))
assert length in [None, len(L3)], "zip_first() failure: length differs!"
return L3
def METHOD_NAME(val):
if '.' in val:
return float(val)
return int(val)
def load_ranking(path, types=None, lazy=False):
print_message(f"#> Loading the ranked lists from {path} ..")
try:
lists = torch.load(path)
lists = zipstar([l.tolist() for l in tqdm.tqdm(lists)], lazy=lazy)
except:
if types is None:
types = itertools.cycle([METHOD_NAME])
with open(path) as f:
lists = [[typ(x) for typ, x in zip_first(types, line.strip().split('\t'))]
for line in file_tqdm(f)]
return lists
def save_ranking(ranking, path):
lists = zipstar(ranking)
lists = [torch.tensor(l) for l in lists]
torch.save(lists, path)
return lists
def groupby_first_item(lst):
groups = defaultdict(list)
for first, *rest in lst:
rest = rest[0] if len(rest) == 1 else rest
groups[first].append(rest)
return groups
def process_grouped_by_first_item(lst):
"""
Requires items in list to already be grouped by first item.
"""
groups = defaultdict(list)
started = False
last_group = None
for first, *rest in lst:
rest = rest[0] if len(rest) == 1 else rest
if started and first != last_group:
yield (last_group, groups[last_group])
assert first not in groups, f"{first} seen earlier --- violates precondition."
groups[first].append(rest)
last_group = first
started = True
return groups
def grouper(iterable, n, fillvalue=None):
"""
Collect data into fixed-length chunks or blocks
Example: grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
Source: https://docs.python.org/3/library/itertools.html#itertools-recipes
"""
args = [iter(iterable)] * n
return itertools.zip_longest(*args, fillvalue=fillvalue)
def lengths2offsets(lengths):
offset = 0
for length in lengths:
yield (offset, offset + length)
offset += length
return
class NullContextManager(object):
def __init__(self, dummy_resource=None):
self.dummy_resource = dummy_resource
def __enter__(self):
return self.dummy_resource
def __exit__(self, *args):
pass
def load_batch_backgrounds(args, qids):
if args.qid2backgrounds is None:
return None
qbackgrounds = []
for qid in qids:
back = args.qid2backgrounds[qid]
if len(back) and type(back[0]) == int:
x = [args.collection[pid] for pid in back]
else:
x = [args.collectionX.get(pid, '') for pid in back]
x = ' [SEP] '.join(x)
qbackgrounds.append(x)
return qbackgrounds |
299,722 | read header | import sys
import datetime
import zlib
from pprint import pprint
from bitarray import bitarray
from puff import Puff
class GunZip(Puff):
operating_system = {
0: "FAT", 1: "Amiga", 2: "VMS", 3: "Unix",
4: "VM/CMS", 5: "Atari TOS", 6: "HPFS", 7: "Macintosh",
8: "Z-System", 9: "CP/M", 10: "TOPS-20", 11: "NTFS",
12: "QDOS", 13: "Acorn RISCOS", 255: "Unknown",
}
def read_nul_terminated_string(self) -> str:
a = bytearray()
while True:
b: int = self.read_uint(8)
if b == 0:
return a.decode("UTF-8")
a.append(b)
def METHOD_NAME(self, verbose=False) -> None:
def vprint(txt):
if verbose:
print(txt)
if self.read_uint(16) != 0x8b1f:
raise ValueError("Invalid GZIP magic number")
cmeth = self.read_uint(8)
if cmeth != 8:
raise ValueError(f"Unsupported compression method: {str(cmeth)}")
# reserved flags
flags: int = self.read_uint(8)
if flags & 0xe0 != 0:
vprint("Reserved flags are set")
# modification time
mtime = self.read_uint(32)
if mtime != 0:
dt = datetime.datetime.fromtimestamp(mtime, datetime.timezone.utc)
vprint(f"Last modified: {dt}")
else:
vprint("Last modified: N/A")
# extra flags
extraflags = self.read_uint(8)
if extraflags == 2:
vprint("Extra flags: Maximum compression")
elif extraflags == 4:
vprint("Extra flags: Fastest compression")
else:
vprint(f"Extra flags: Unknown ({extraflags})")
osbyte = self.read_uint(8)
osstr: str = self.operating_system.get(osbyte, "Really unknown")
vprint(f"Operating system: {osstr}")
# handle assorted flags
if flags & 0x01:
vprint("Flag: Text")
if flags & 0x04:
vprint("Flag: Extra")
count: int = self.read_uint(16)
while count > 0: # Skip extra data
self.read_uint(8)
count -= 1
if flags & 0x08:
vprint(f"File name: {self.read_nul_terminated_string()}")
if flags & 0x02:
vprint(f"Header CRC-16: {self.read_uint(16):04X}")
if flags & 0x10:
vprint(f"Comment: {self.read_nul_terminated_string()}")
def check_footer(self, decomp):
self.align_byte_boundary()
crc = self.read_uint(32)
size = self.read_uint(32)
# check decompressed data's length and CRC
if size != len(decomp):
raise ValueError(f"Size mismatch: expected={size}, "
f"actual={len(decomp)}")
actualcrc = zlib.crc32(decomp) & 0xffffffff
if crc != actualcrc:
raise ValueError(f"CRC-32 mismatch: expected={crc:08X}, "
f"actual={actualcrc:08X}")
def print_dot(*args):
sys.stdout.write('.')
sys.stdout.flush()
def decompress_file(infile, outfile, opts):
# read input file and store content in little endian bitarray
input_bits = bitarray(0, 'little')
with open(infile, "rb") as fi:
input_bits.fromfile(fi)
# gunzip: the output is accumulated in a bytearray
output = bytearray()
d = GunZip(input_bits, output)
d.METHOD_NAME(verbose=opts.verbose)
stats = d.process_blocks(print_dot if opts.progress else None)
d.check_footer(output)
if opts.progress:
sys.stdout.write('\n')
if opts.stats:
pprint(stats)
# write output to file
with open(outfile, "wb") as fo:
fo.write(output)
def main():
from argparse import ArgumentParser
p = ArgumentParser()
p.add_argument('-p', '--progress', action="store_true",
help="show progress while decoding")
p.add_argument('-s', '--stats', action="store_true",
help="show block statistics")
p.add_argument('-v', '--verbose', action="store_true")
p.add_argument('-o', '--out', action="store", dest='dst',
help='output filename')
p.add_argument(dest='src', metavar='SRC')
args = p.parse_args()
if args.dst is None:
if args.src.endswith('.gz'):
args.dst = args.src[:-3]
elif args.src.endswith('.tgz'):
args.dst = '%s.tar' % args.src[:-4]
else:
p.error('cannot guess uncompressed filename from %r, '
'please provide -o/-out option' % args.src)
decompress_file(args.src, args.dst, args)
if __name__ == "__main__":
main() |
299,723 | flatten op tree | # Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A recursive type describing trees of operations, and utility methods for it.
"""
from typing import Callable, Iterable, Iterator, NoReturn, Union, TYPE_CHECKING
from typing_extensions import Protocol
from cirq._doc import document
from cirq._import import LazyLoader
from cirq.ops.raw_types import Operation
if TYPE_CHECKING:
import cirq
moment = LazyLoader("moment", globals(), "cirq.circuits.moment")
class OpTree(Protocol):
"""The recursive type consumed by circuit builder methods.
An OpTree is a type protocol, satisfied by anything that can be recursively
flattened into Operations. We also define the Union type OP_TREE which
can be an OpTree or just a single Operation.
For example:
- An Operation is an OP_TREE all by itself.
- A list of operations is an OP_TREE.
- A list of tuples of operations is an OP_TREE.
- A list with a mix of operations and lists of operations is an OP_TREE.
- A generator yielding operations is an OP_TREE.
Note: once mypy supports recursive types this could be defined as an alias:
OP_TREE = Union[Operation, Iterable['OP_TREE']]
See: https://github.com/python/mypy/issues/731
"""
def __iter__(self) -> Iterator[Union[Operation, 'OpTree']]:
pass
OP_TREE = Union[Operation, OpTree]
document(
OP_TREE,
"""An operation or nested collections of operations.
Here are some examples of things that can be given to a method that takes a
`cirq.OP_TREE` argument:
- A single operation (a `cirq.Operation`).
- A list of operations (a `List[cirq.Operation]`).
- A list of lists of operations (a `List[List[cirq.Operation]]`).
- A list mixing operations and generators of operations
(a `List[Union[cirq.Operation, Iterator[cirq.Operation]]]`).
- Generally anything that can be iterated, and its items iterated, and
so forth recursively until a bottom layer of operations is found.
""",
)
def METHOD_NAME(
root: OP_TREE, preserve_moments: bool = False
) -> Iterator[Union[Operation, 'cirq.Moment']]:
"""Performs an in-order iteration of the operations (leaves) in an OP_TREE.
Args:
root: The operation or tree of operations to iterate.
preserve_moments: Whether to yield Moments intact instead of
flattening them
Yields:
Operations from the tree.
Raises:
TypeError: root isn't a valid OP_TREE.
"""
if preserve_moments:
return flatten_to_ops_or_moments(root)
else:
return flatten_to_ops(root)
def flatten_to_ops(root: OP_TREE) -> Iterator[Operation]:
"""Performs an in-order iteration of the operations (leaves) in an OP_TREE.
Args:
root: The operation or tree of operations to iterate.
Yields:
Operations or moments from the tree.
Raises:
TypeError: root isn't a valid OP_TREE.
"""
if isinstance(root, Operation):
yield root
elif isinstance(root, Iterable) and not isinstance(root, str):
for subtree in root:
yield from flatten_to_ops(subtree)
else:
_bad_op_tree(root)
def flatten_to_ops_or_moments(root: OP_TREE) -> Iterator[Union[Operation, 'cirq.Moment']]:
"""Performs an in-order iteration OP_TREE, yielding ops and moments.
Args:
root: The operation or tree of operations to iterate.
Yields:
Operations or moments from the tree.
Raises:
TypeError: root isn't a valid OP_TREE.
"""
if isinstance(root, (Operation, moment.Moment)):
yield root
elif isinstance(root, Iterable) and not isinstance(root, str):
for subtree in root:
yield from flatten_to_ops_or_moments(subtree)
else:
_bad_op_tree(root)
def transform_op_tree(
root: OP_TREE,
op_transformation: Callable[[Operation], OP_TREE] = lambda e: e,
iter_transformation: Callable[[Iterable[OP_TREE]], OP_TREE] = lambda e: e,
preserve_moments: bool = False,
) -> OP_TREE:
"""Maps transformation functions onto the nodes of an OP_TREE.
Args:
root: The operation or tree of operations to transform.
op_transformation: How to transform the operations (i.e. leaves).
iter_transformation: How to transform the iterables (i.e. internal
nodes).
preserve_moments: Whether to leave Moments alone. If True, the
transformation functions will not be applied to Moments or the
operations within them.
Returns:
A transformed operation tree.
Raises:
TypeError: root isn't a valid OP_TREE.
"""
if isinstance(root, Operation):
return op_transformation(root)
if preserve_moments and isinstance(root, moment.Moment):
return root
if isinstance(root, Iterable) and not isinstance(root, str):
return iter_transformation(
transform_op_tree(subtree, op_transformation, iter_transformation, preserve_moments)
for subtree in root
)
_bad_op_tree(root)
def freeze_op_tree(root: OP_TREE) -> OP_TREE:
"""Replaces all iterables in the OP_TREE with tuples.
Args:
root: The operation or tree of operations to freeze.
Returns:
An OP_TREE with the same operations and branching structure, but where
all internal nodes are tuples instead of arbitrary iterables.
"""
return transform_op_tree(root, iter_transformation=tuple)
def _bad_op_tree(root: OP_TREE) -> NoReturn:
raise TypeError(f'Not an Operation or Iterable: {type(root)} {root}') |
299,724 | vec3 add x | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: NestedUnion
import flatbuffers
from flatbuffers.compat import import_numpy
from typing import Any
from MyGame.Example.NestedUnion.Test import Test
from typing import Optional
np = import_numpy()
class Vec3(object):
__slots__ = ['_tab']
@classmethod
def GetRootAs(cls, buf, offset: int = 0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Vec3()
x.Init(buf, n + offset)
return x
@classmethod
def GetRootAsVec3(cls, buf, offset=0):
"""This method is deprecated. Please switch to GetRootAs."""
return cls.GetRootAs(buf, offset)
# Vec3
def Init(self, buf: bytes, pos: int):
self._tab = flatbuffers.table.Table(buf, pos)
# Vec3
def X(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# Vec3
def Y(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# Vec3
def Z(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# Vec3
def Test1(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Float64Flags, o + self._tab.Pos)
return 0.0
# Vec3
def Test2(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
# Vec3
def Test3(self) -> Optional[Test]:
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
x = o + self._tab.Pos
obj = Test()
obj.Init(self._tab.Bytes, x)
return obj
return None
def Vec3Start(builder: flatbuffers.Builder):
builder.StartObject(6)
def Start(builder: flatbuffers.Builder):
Vec3Start(builder)
def METHOD_NAME(builder: flatbuffers.Builder, x: float):
builder.PrependFloat64Slot(0, x, 0.0)
def AddX(builder: flatbuffers.Builder, x: float):
METHOD_NAME(builder, x)
def Vec3AddY(builder: flatbuffers.Builder, y: float):
builder.PrependFloat64Slot(1, y, 0.0)
def AddY(builder: flatbuffers.Builder, y: float):
Vec3AddY(builder, y)
def Vec3AddZ(builder: flatbuffers.Builder, z: float):
builder.PrependFloat64Slot(2, z, 0.0)
def AddZ(builder: flatbuffers.Builder, z: float):
Vec3AddZ(builder, z)
def Vec3AddTest1(builder: flatbuffers.Builder, test1: float):
builder.PrependFloat64Slot(3, test1, 0.0)
def AddTest1(builder: flatbuffers.Builder, test1: float):
Vec3AddTest1(builder, test1)
def Vec3AddTest2(builder: flatbuffers.Builder, test2: int):
builder.PrependUint8Slot(4, test2, 0)
def AddTest2(builder: flatbuffers.Builder, test2: int):
Vec3AddTest2(builder, test2)
def Vec3AddTest3(builder: flatbuffers.Builder, test3: Any):
builder.PrependStructSlot(5, flatbuffers.number_types.UOffsetTFlags.py_type(test3), 0)
def AddTest3(builder: flatbuffers.Builder, test3: Any):
Vec3AddTest3(builder, test3)
def Vec3End(builder: flatbuffers.Builder) -> int:
return builder.EndObject()
def End(builder: flatbuffers.Builder) -> int:
return Vec3End(builder)
import MyGame.Example.NestedUnion.Test
try:
from typing import Optional
except:
pass
class Vec3T(object):
# Vec3T
def __init__(self):
self.x = 0.0 # type: float
self.y = 0.0 # type: float
self.z = 0.0 # type: float
self.test1 = 0.0 # type: float
self.test2 = 0 # type: int
self.test3 = None # type: Optional[MyGame.Example.NestedUnion.Test.TestT]
@classmethod
def InitFromBuf(cls, buf, pos):
vec3 = Vec3()
vec3.Init(buf, pos)
return cls.InitFromObj(vec3)
@classmethod
def InitFromPackedBuf(cls, buf, pos=0):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, pos)
return cls.InitFromBuf(buf, pos+n)
@classmethod
def InitFromObj(cls, vec3):
x = Vec3T()
x._UnPack(vec3)
return x
# Vec3T
def _UnPack(self, vec3):
if vec3 is None:
return
self.x = vec3.X()
self.y = vec3.Y()
self.z = vec3.Z()
self.test1 = vec3.Test1()
self.test2 = vec3.Test2()
if vec3.Test3() is not None:
self.test3 = MyGame.Example.NestedUnion.Test.TestT.InitFromObj(vec3.Test3())
# Vec3T
def Pack(self, builder):
Vec3Start(builder)
METHOD_NAME(builder, self.x)
Vec3AddY(builder, self.y)
Vec3AddZ(builder, self.z)
Vec3AddTest1(builder, self.test1)
Vec3AddTest2(builder, self.test2)
if self.test3 is not None:
test3 = self.test3.Pack(builder)
Vec3AddTest3(builder, test3)
vec3 = Vec3End(builder)
return vec3 |
299,725 | test nr uhf | #!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
from pyscf import gto
from pyscf import scf
from pyscf.scf import dhf
def setUpModule():
global mol
mol = gto.Mole()
mol.build(
verbose = 0,
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'cc-pvdz',
)
def tearDownModule():
global mol
del mol
class KnownValues(unittest.TestCase):
def test_nr_rhf(self):
rhf = scf.RHF(mol)
rhf.conv_tol = 1e-11
self.assertAlmostEqual(rhf.scf(), -76.026765673119627, 9)
def test_nr_rohf(self):
mol = gto.Mole()
mol.build(
verbose = 0,
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'cc-pvdz',
charge = 1,
spin = 1,
)
mf = scf.rohf.ROHF(mol)
mf.conv_tol = 1e-11
self.assertAlmostEqual(mf.scf(), -75.627354109594179, 9)
def METHOD_NAME(self):
uhf = scf.UHF(mol)
uhf.conv_tol = 1e-11
self.assertAlmostEqual(uhf.scf(), -76.026765673119598, 9)
def test_nr_df_rhf(self):
rhf = scf.density_fit(scf.RHF(mol), 'weigend')
rhf.conv_tol = 1e-11
self.assertAlmostEqual(rhf.scf(), -76.025936299701982, 9)
def test_nr_df_rohf(self):
mol = gto.Mole()
mol.build(
verbose = 0,
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'cc-pvdz',
charge = 1,
spin = 1,
)
mf = scf.density_fit(scf.ROHF(mol), 'weigend')
mf.conv_tol = 1e-11
self.assertAlmostEqual(mf.scf(), -75.626515724371899, 9)
def test_nr_df_uhf(self):
uhf = scf.density_fit(scf.UHF(mol), 'weigend')
uhf.conv_tol = 1e-11
self.assertAlmostEqual(uhf.scf(), -76.025936299702096, 9)
def test_nr_rhf_no_mem(self):
rhf = scf.RHF(mol)
rhf.conv_tol = 1e-11
rhf.max_memory = 0
self.assertAlmostEqual(rhf.scf(), -76.026765673120565, 9)
def test_nr_uhf_no_mem(self):
uhf = scf.UHF(mol)
uhf.conv_tol = 1e-11
uhf.max_memory = 0
self.assertAlmostEqual(uhf.scf(), -76.02676567312075, 9)
def test_nr_rhf_no_direct(self):
rhf = scf.RHF(mol)
rhf.conv_tol = 1e-11
rhf.max_memory = 0
rhf.direct_scf = False
self.assertAlmostEqual(rhf.scf(), -76.02676567311957, 9)
def test_nr_uhf_no_direct(self):
uhf = scf.UHF(mol)
uhf.conv_tol = 1e-11
uhf.max_memory = 0
uhf.direct_scf = False
self.assertAlmostEqual(uhf.scf(), -76.02676567311958, 9)
def test_r_uhf_high_cost(self):
uhf = dhf.UHF(mol)
uhf.conv_tol_grad = 1e-5
self.assertAlmostEqual(uhf.scf(), -76.081567907064198, 6)
def test_r_rhf_high_cost(self):
uhf = dhf.RHF(mol)
uhf.conv_tol_grad = 1e-5
self.assertAlmostEqual(uhf.scf(), -76.081567907064198, 6)
def test_level_shift_uhf(self):
uhf = scf.UHF(mol)
uhf.level_shift = .2
self.assertAlmostEqual(uhf.scf(), -76.026765673118078, 9)
def test_nr_rhf_symm(self):
mol1 = mol.copy()
mol1.symmetry = 1
mol1.build()
rhf = scf.hf.RHF(mol1)
rhf.conv_tol = 1e-11
self.assertAlmostEqual(rhf.scf(), -76.026765673119655, 9)
def test_nr_rohf_symm(self):
mol = gto.Mole()
mol.build(
verbose = 0,
atom = [
["O" , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)] ],
basis = 'cc-pvdz',
charge = 1,
spin = 1,
symmetry = True,
)
mf = scf.hf_symm.ROHF(mol)
mf.conv_tol = 1e-11
self.assertAlmostEqual(mf.scf(), -75.627354109593952, 9)
def test_nr_uhf_symm(self):
mol1 = mol.copy()
mol1.symmetry = 1
mol1.build()
uhf = scf.uhf_symm.UHF(mol1)
uhf.conv_tol = 1e-11
self.assertAlmostEqual(uhf.scf(), -76.026765673119584, 9)
if __name__ == "__main__":
print("Full Tests for H2O vdz")
unittest.main()
|
299,726 | dict row | """
psycopg row factories
"""
# Copyright (C) 2021 The Psycopg Team
import functools
from typing import Any, Callable, Dict, List, Optional, NamedTuple, NoReturn
from typing import TYPE_CHECKING, Sequence, Tuple, Type, TypeVar
from collections import namedtuple
from typing_extensions import TypeAlias
from . import pq
from . import errors as e
from ._compat import Protocol
from ._encodings import _as_python_identifier
if TYPE_CHECKING:
from .cursor import BaseCursor, Cursor
from .cursor_async import AsyncCursor
from psycopg.pq.abc import PGresult
COMMAND_OK = pq.ExecStatus.COMMAND_OK
TUPLES_OK = pq.ExecStatus.TUPLES_OK
SINGLE_TUPLE = pq.ExecStatus.SINGLE_TUPLE
T = TypeVar("T", covariant=True)
# Row factories
Row = TypeVar("Row", covariant=True)
class RowMaker(Protocol[Row]):
"""
Callable protocol taking a sequence of value and returning an object.
The sequence of value is what is returned from a database query, already
adapted to the right Python types. The return value is the object that your
program would like to receive: by default (`tuple_row()`) it is a simple
tuple, but it may be any type of object.
Typically, `!RowMaker` functions are returned by `RowFactory`.
"""
def __call__(self, __values: Sequence[Any]) -> Row:
...
class RowFactory(Protocol[Row]):
"""
Callable protocol taking a `~psycopg.Cursor` and returning a `RowMaker`.
A `!RowFactory` is typically called when a `!Cursor` receives a result.
This way it can inspect the cursor state (for instance the
`~psycopg.Cursor.description` attribute) and help a `!RowMaker` to create
a complete object.
For instance the `dict_row()` `!RowFactory` uses the names of the column to
define the dictionary key and returns a `!RowMaker` function which would
use the values to create a dictionary for each record.
"""
def __call__(self, __cursor: "Cursor[Any]") -> RowMaker[Row]:
...
class AsyncRowFactory(Protocol[Row]):
"""
Like `RowFactory`, taking an async cursor as argument.
"""
def __call__(self, __cursor: "AsyncCursor[Any]") -> RowMaker[Row]:
...
class BaseRowFactory(Protocol[Row]):
"""
Like `RowFactory`, taking either type of cursor as argument.
"""
def __call__(self, __cursor: "BaseCursor[Any, Any]") -> RowMaker[Row]:
...
TupleRow: TypeAlias = Tuple[Any, ...]
"""
An alias for the type returned by `tuple_row()` (i.e. a tuple of any content).
"""
DictRow: TypeAlias = Dict[str, Any]
"""
An alias for the type returned by `dict_row()`
A `!DictRow` is a dictionary with keys as string and any value returned by the
database.
"""
def tuple_row(cursor: "BaseCursor[Any, Any]") -> "RowMaker[TupleRow]":
r"""Row factory to represent rows as simple tuples.
This is the default factory, used when `~psycopg.Connection.connect()` or
`~psycopg.Connection.cursor()` are called without a `!row_factory`
parameter.
"""
# Implementation detail: make sure this is the tuple type itself, not an
# equivalent function, because the C code fast-paths on it.
return tuple
def METHOD_NAME(cursor: "BaseCursor[Any, Any]") -> "RowMaker[DictRow]":
"""Row factory to represent rows as dictionaries.
The dictionary keys are taken from the column names of the returned columns.
"""
names = _get_names(cursor)
if names is None:
return no_result
def dict_row_(values: Sequence[Any]) -> Dict[str, Any]:
return dict(zip(names, values))
return dict_row_
def namedtuple_row(
cursor: "BaseCursor[Any, Any]",
) -> "RowMaker[NamedTuple]":
"""Row factory to represent rows as `~collections.namedtuple`.
The field names are taken from the column names of the returned columns,
with some mangling to deal with invalid names.
"""
res = cursor.pgresult
if not res:
return no_result
nfields = _get_nfields(res)
if nfields is None:
return no_result
nt = _make_nt(cursor._encoding, *(res.fname(i) for i in range(nfields)))
return nt._make
@functools.lru_cache(512)
def _make_nt(enc: str, *names: bytes) -> Type[NamedTuple]:
snames = tuple(_as_python_identifier(n.decode(enc)) for n in names)
return namedtuple("Row", snames) # type: ignore[return-value]
def class_row(cls: Type[T]) -> BaseRowFactory[T]:
r"""Generate a row factory to represent rows as instances of the class `!cls`.
The class must support every output column name as a keyword parameter.
:param cls: The class to return for each row. It must support the fields
returned by the query as keyword arguments.
:rtype: `!Callable[[Cursor],` `RowMaker`\[~T]]
"""
def class_row_(cursor: "BaseCursor[Any, Any]") -> "RowMaker[T]":
names = _get_names(cursor)
if names is None:
return no_result
def class_row__(values: Sequence[Any]) -> T:
return cls(**dict(zip(names, values)))
return class_row__
return class_row_
def args_row(func: Callable[..., T]) -> BaseRowFactory[T]:
"""Generate a row factory calling `!func` with positional parameters for every row.
:param func: The function to call for each row. It must support the fields
returned by the query as positional arguments.
"""
def args_row_(cur: "BaseCursor[Any, T]") -> "RowMaker[T]":
def args_row__(values: Sequence[Any]) -> T:
return func(*values)
return args_row__
return args_row_
def kwargs_row(func: Callable[..., T]) -> BaseRowFactory[T]:
"""Generate a row factory calling `!func` with keyword parameters for every row.
:param func: The function to call for each row. It must support the fields
returned by the query as keyword arguments.
"""
def kwargs_row_(cursor: "BaseCursor[Any, T]") -> "RowMaker[T]":
names = _get_names(cursor)
if names is None:
return no_result
def kwargs_row__(values: Sequence[Any]) -> T:
return func(**dict(zip(names, values)))
return kwargs_row__
return kwargs_row_
def no_result(values: Sequence[Any]) -> NoReturn:
"""A `RowMaker` that always fail.
It can be used as return value for a `RowFactory` called with no result.
Note that the `!RowFactory` *will* be called with no result, but the
resulting `!RowMaker` never should.
"""
raise e.InterfaceError("the cursor doesn't have a result")
def _get_names(cursor: "BaseCursor[Any, Any]") -> Optional[List[str]]:
res = cursor.pgresult
if not res:
return None
nfields = _get_nfields(res)
if nfields is None:
return None
enc = cursor._encoding
return [
res.fname(i).decode(enc) for i in range(nfields) # type: ignore[union-attr]
]
def _get_nfields(res: "PGresult") -> Optional[int]:
"""
Return the number of columns in a result, if it returns tuples else None
Take into account the special case of results with zero columns.
"""
nfields = res.nfields
if (
res.status == TUPLES_OK
or res.status == SINGLE_TUPLE
# "describe" in named cursors
or (res.status == COMMAND_OK and nfields)
):
return nfields
else:
return None |
299,727 | pull | import abc
import json
import urllib.parse
import uuid
import hashlib
import datetime
import logging
import asab.web.rest.json
import http.client
import typing
#
L = logging.getLogger(__name__)
#
class UpsertorABC(abc.ABC):
def __init__(self, storage, collection, obj_id, version=None):
self.Storage = storage
self.Collection = collection
self.ObjId = obj_id
self.Version = version
now = datetime.datetime.now(datetime.timezone.utc)
self.ModSet = {
'_m': now, # Set the modification datetime
}
if version == 0:
self.ModSet['_c'] = now # Set the creation datetime
self.ModUnset = {}
self.ModInc = {
'_v': 1, # Increment '_v' at every change
}
self.ModPush = {}
self.ModPull = {}
self.WebhookResponseData = {}
def get_id_name(self):
return "_id"
@classmethod
def generate_id(cls) -> bytes:
"""
Generate a unique ID string using a combination of a random UUID and a SHA-256 hash.
Returns:
A string representation of the generated ID.
"""
m = hashlib.sha256()
m.update(uuid.uuid4().bytes)
return m.digest()
def set(self, objField, value, encrypt=False, encrypt_iv=None):
"""
Add key and value to the upsertor.
Args:
objField: Key of the object.
value: Value of the object.
encrypt: Allow encryption.
encrypt_iv: Custom initialization vector.
"""
if encrypt:
value = self.Storage.aes_encrypt(value, iv=encrypt_iv)
self.ModSet[objField] = value
def unset(self, obj_field):
'''
Scalar unset
'''
self.ModUnset[obj_field] = ""
def increment(self, field_name, amount=1):
'''
Scalar increment
'''
self.ModInc[field_name] = amount
def decrement(self, field_name, amount=1):
'''
Scalar decrement
'''
return self.increment(field_name, -amount)
def push(self, field_name, value):
'''
Push an item into a list
'''
if self.ModPush.get(field_name) is None:
self.ModPush[field_name] = []
self.ModPush[field_name].append(value)
def METHOD_NAME(self, field_name, value):
'''
Pull an item from a list
'''
if self.ModPull.get(field_name) is None:
self.ModPull[field_name] = []
self.ModPull[field_name].append(value)
@abc.abstractmethod
async def execute(self, custom_data: typing.Optional[dict] = None, event_type: typing.Optional[str] = None):
"""
Commit upsertor data to the storage. Afterwards, send a webhook request with upsertion details.
Args:
custom_data: Custom execution data. Included in webhook payload.
event_type: Event type included in webhook payload.
Raises:
DuplicateError: Raised if there is a colliding object already stored in a storage.
"""
pass
async def webhook(self, data: dict):
# TODO: add docstring
assert self.Storage.WebhookURIs is not None
json_dump = asab.web.rest.json.JSONDumper(pretty=False)(data)
for uri in self.Storage.WebhookURIs:
self.WebhookResponseData[uri] = await self.Storage.ProactorService.execute(
self._webhook, json_dump, uri, self.Storage.WebhookAuth)
def _webhook(self, data, uri, auth=None):
u = urllib.parse.urlparse(uri)
if u.scheme == "https":
conn = http.client.HTTPSConnection(u.netloc)
else:
conn = http.client.HTTPConnection(u.netloc)
headers = {"Content-Type": "application/json"}
if auth is not None:
headers["Authorization"] = auth
try:
conn.request("PUT", uri, data, headers)
response = conn.getresponse()
if response.status // 100 != 2:
text = response.read()
L.error(
"Webhook endpoint responded with {}: {}".format(response.status, text),
struct_data={"uri": uri})
return
self.WebhookResponseData = json.load(response)
except ConnectionRefusedError:
L.error("Webhook call failed: Connection refused.", struct_data={"uri": uri})
return
except json.decoder.JSONDecodeError as e:
L.error("Failed to decode JSON response from webhook: {}".format(str(e)), struct_data={"uri": uri})
except Exception as e:
L.error("Webhook call failed with {}: {}".format(type(e).__name__, str(e)), struct_data={"uri": uri})
finally:
conn.close() |
299,728 | cu init raising test | import multiprocessing as mp
import os
from numba import cuda
from numba.cuda.cudadrv.driver import CudaAPIError, driver
from numba.cuda.cudadrv.error import CudaSupportError
from numba.cuda.testing import skip_on_cudasim, unittest, CUDATestCase
# A mock of cuInit that always raises a CudaAPIError
def cuInit_raising(arg):
raise CudaAPIError(999, 'CUDA_ERROR_UNKNOWN')
# Test code to run in a child that patches driver.cuInit to a variant that
# always raises. We can't use mock.patch.object here because driver.cuInit is
# not assigned until we attempt to initialize - mock.patch.object cannot locate
# the non-existent original method, and so fails. Instead we patch
# driver.cuInit with our raising version prior to any attempt to initialize.
def METHOD_NAME(result_queue):
driver.cuInit = cuInit_raising
success = False
msg = None
try:
# A CUDA operation that forces initialization of the device
cuda.device_array(1)
except CudaSupportError as e:
success = True
msg = e.msg
result_queue.put((success, msg))
# Similar to cuInit_raising_test above, but for testing that the string
# returned by cuda_error() is as expected.
def initialization_error_test(result_queue):
driver.cuInit = cuInit_raising
success = False
msg = None
try:
# A CUDA operation that forces initialization of the device
cuda.device_array(1)
except CudaSupportError:
success = True
msg = cuda.cuda_error()
result_queue.put((success, msg))
# For testing the path where Driver.__init__() catches a CudaSupportError
def cuda_disabled_test(result_queue):
success = False
msg = None
try:
# A CUDA operation that forces initialization of the device
cuda.device_array(1)
except CudaSupportError as e:
success = True
msg = e.msg
result_queue.put((success, msg))
# Similar to cuda_disabled_test, but checks cuda.cuda_error() instead of the
# exception raised on initialization
def cuda_disabled_error_test(result_queue):
success = False
msg = None
try:
# A CUDA operation that forces initialization of the device
cuda.device_array(1)
except CudaSupportError:
success = True
msg = cuda.cuda_error()
result_queue.put((success, msg))
@skip_on_cudasim('CUDA Simulator does not initialize driver')
class TestInit(CUDATestCase):
def _test_init_failure(self, target, expected):
# Run the initialization failure test in a separate subprocess
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
proc = ctx.Process(target=target, args=(result_queue,))
proc.start()
proc.join(30) # should complete within 30s
success, msg = result_queue.get()
# Ensure the child process raised an exception during initialization
# before checking the message
if not success:
self.fail('CudaSupportError not raised')
self.assertIn(expected, msg)
def test_init_failure_raising(self):
expected = 'Error at driver init: CUDA_ERROR_UNKNOWN (999)'
self._test_init_failure(METHOD_NAME, expected)
def test_init_failure_error(self):
expected = 'CUDA_ERROR_UNKNOWN (999)'
self._test_init_failure(initialization_error_test, expected)
def _test_cuda_disabled(self, target):
# Uses _test_init_failure to launch the test in a separate subprocess
# with CUDA disabled.
cuda_disabled = os.environ.get('NUMBA_DISABLE_CUDA')
os.environ['NUMBA_DISABLE_CUDA'] = "1"
try:
expected = 'CUDA is disabled due to setting NUMBA_DISABLE_CUDA=1'
self._test_init_failure(cuda_disabled_test, expected)
finally:
if cuda_disabled is not None:
os.environ['NUMBA_DISABLE_CUDA'] = cuda_disabled
else:
os.environ.pop('NUMBA_DISABLE_CUDA')
def test_cuda_disabled_raising(self):
self._test_cuda_disabled(cuda_disabled_test)
def test_cuda_disabled_error(self):
self._test_cuda_disabled(cuda_disabled_error_test)
def test_init_success(self):
# Here we assume that initialization is successful (because many bad
# things will happen with the test suite if it is not) and check that
# there is no error recorded.
self.assertIsNone(cuda.cuda_error())
if __name__ == '__main__':
unittest.main() |
299,729 | get test 5 | from typing import List, Optional
from uuid import UUID, uuid4
import databases
import pydantic
import pytest
import sqlalchemy
from asgi_lifespan import LifespanManager
from fastapi import FastAPI
from httpx import AsyncClient
import ormar
from tests.settings import DATABASE_URL
app = FastAPI()
database = databases.Database(DATABASE_URL, force_rollback=True)
metadata = sqlalchemy.MetaData()
app.state.database = database
@app.on_event("startup")
async def startup() -> None:
database_ = app.state.database
if not database_.is_connected:
await database_.connect()
@app.on_event("shutdown")
async def shutdown() -> None:
database_ = app.state.database
if database_.is_connected:
await database_.disconnect()
@pytest.fixture(autouse=True, scope="module")
def create_test_database():
engine = sqlalchemy.create_engine(DATABASE_URL)
metadata.create_all(engine)
yield
metadata.drop_all(engine)
class BaseMeta(ormar.ModelMeta):
database = database
metadata = metadata
class OtherThing(ormar.Model):
class Meta(BaseMeta):
tablename = "other_things"
id: UUID = ormar.UUID(primary_key=True, default=uuid4)
name: str = ormar.Text(default="")
ot_contents: str = ormar.Text(default="")
class Thing(ormar.Model):
class Meta(BaseMeta):
tablename = "things"
id: UUID = ormar.UUID(primary_key=True, default=uuid4)
name: str = ormar.Text(default="")
js: pydantic.Json = ormar.JSON(nullable=True)
other_thing: Optional[OtherThing] = ormar.ForeignKey(OtherThing, nullable=True)
@app.post("/test/1")
async def post_test_1():
# don't split initialization and attribute assignment
ot = await OtherThing(ot_contents="otc").save()
await Thing(other_thing=ot, name="t1").save()
await Thing(other_thing=ot, name="t2").save()
await Thing(other_thing=ot, name="t3").save()
# if you do not care about returned object you can even go with bulk_create
# all of them are created in one transaction
# things = [Thing(other_thing=ot, name='t1'),
# Thing(other_thing=ot, name="t2"),
# Thing(other_thing=ot, name="t3")]
# await Thing.objects.bulk_create(things)
@app.get("/test/2", response_model=List[Thing])
async def get_test_2():
# if you only query for one use get or first
ot = await OtherThing.objects.get()
ts = await ot.things.all()
# specifically null out the relation on things before return
for t in ts:
t.remove(ot, name="other_thing")
return ts
@app.get("/test/3", response_model=List[Thing])
async def get_test_3():
ot = await OtherThing.objects.select_related("things").get()
# exclude unwanted field while ot is still in scope
# in order not to pass it to fastapi
return [t.dict(exclude={"other_thing"}) for t in ot.things]
@app.get("/test/4", response_model=List[Thing], response_model_exclude={"other_thing"})
async def get_test_4():
ot = await OtherThing.objects.get()
# query from the active side
return await Thing.objects.all(other_thing=ot)
@app.get("/get_ot/", response_model=OtherThing)
async def get_ot():
return await OtherThing.objects.get()
# more real life (usually) is not getting some random OT and get it's Things
# but query for a specific one by some kind of id
@app.get(
"/test/5/{thing_id}",
response_model=List[Thing],
response_model_exclude={"other_thing"},
)
async def METHOD_NAME(thing_id: UUID):
return await Thing.objects.all(other_thing__id=thing_id)
@app.get(
"/test/error", response_model=List[Thing], response_model_exclude={"other_thing"}
)
async def get_weakref():
ots = await OtherThing.objects.all()
ot = ots[0]
ts = await ot.things.all()
return ts
@pytest.mark.asyncio
async def test_endpoints():
client = AsyncClient(app=app, base_url="http://testserver")
async with client, LifespanManager(app):
resp = await client.post("/test/1")
assert resp.status_code == 200
resp2 = await client.get("/test/2")
assert resp2.status_code == 200
assert len(resp2.json()) == 3
resp3 = await client.get("/test/3")
assert resp3.status_code == 200
assert len(resp3.json()) == 3
resp4 = await client.get("/test/4")
assert resp4.status_code == 200
assert len(resp4.json()) == 3
ot = OtherThing(**(await client.get("/get_ot/")).json())
resp5 = await client.get(f"/test/5/{ot.id}")
assert resp5.status_code == 200
assert len(resp5.json()) == 3
resp6 = await client.get("/test/error")
assert resp6.status_code == 200
assert len(resp6.json()) == 3 |
299,730 | test dynamic | import lief
from utils import get_sample
TARGET = lief.parse(get_sample('ELF/ELF32_x86_binary_all.bin'))
def test_header():
assert TARGET.interpreter == "/lib/ld-linux.so.2"
assert TARGET.entrypoint == 0x774
def test_sections():
assert len(TARGET.sections) == 32
assert TARGET.has_section(".tdata")
text_section = TARGET.get_section(".text")
assert text_section.type == lief.ELF.SECTION_TYPES.PROGBITS
assert text_section.offset == 0x6D0
assert text_section.virtual_address == 0x6D0
assert text_section.size == 0x271
assert text_section.alignment == 16
assert lief.ELF.SECTION_FLAGS.ALLOC in text_section
assert lief.ELF.SECTION_FLAGS.EXECINSTR in text_section
def test_segments():
segments = TARGET.segments
assert len(segments) == 10
LOAD_0 = segments[2]
LOAD_1 = segments[3]
assert LOAD_0.type == lief.ELF.SEGMENT_TYPES.LOAD
assert LOAD_0.file_offset == 0
assert LOAD_0.virtual_address == 0
assert LOAD_0.physical_size == 0x00b34
assert LOAD_0.virtual_size == 0x00b34
assert int(LOAD_0.flags) == lief.ELF.SEGMENT_FLAGS.R | lief.ELF.SEGMENT_FLAGS.X
assert LOAD_1.type == lief.ELF.SEGMENT_TYPES.LOAD
assert LOAD_1.file_offset == 0x000ed8
assert LOAD_1.virtual_address == 0x00001ed8
assert LOAD_1.physical_address == 0x00001ed8
assert LOAD_1.physical_size == 0x00148
assert LOAD_1.virtual_size == 0x0014c
assert int(LOAD_1.flags) == lief.ELF.SEGMENT_FLAGS.R | lief.ELF.SEGMENT_FLAGS.W
def METHOD_NAME():
entries = TARGET.dynamic_entries
assert len(entries) == 28
assert entries[0].name == "libc.so.6"
assert entries[3].array == [2208, 1782]
assert TARGET[lief.ELF.DYNAMIC_TAGS.FLAGS_1].value == 0x8000000
def test_relocations():
dynamic_relocations = TARGET.dynamic_relocations
pltgot_relocations = TARGET.pltgot_relocations
assert len(dynamic_relocations) == 10
assert len(pltgot_relocations) == 3
assert dynamic_relocations[0].address == 0x00001edc
assert dynamic_relocations[8].symbol.name == "__gmon_start__"
assert dynamic_relocations[9].address == 0x00001ffc
assert pltgot_relocations[1].address == 0x00002010
assert pltgot_relocations[1].symbol.name == "puts"
assert pltgot_relocations[1].info == 4
def test_symbols():
dynamic_symbols = TARGET.dynamic_symbols
static_symbols = TARGET.static_symbols
assert len(dynamic_symbols) == 27
assert len(static_symbols) == 78
first = TARGET.get_dynamic_symbol("first")
assert first.value == 0x000008a9
assert first.symbol_version.value == 0x8002
assert first.symbol_version.symbol_version_auxiliary.name == "LIBSIMPLE_1.0"
dtor = TARGET.get_static_symbol("__cxa_finalize@@GLIBC_2.1.3")
assert dtor.value == 00000000
symbol_version_definition = TARGET.symbols_version_definition
symbols_version_requirement = TARGET.symbols_version_requirement
symbols_version = TARGET.symbols_version
assert len(symbol_version_definition) == 2
assert len(symbols_version_requirement) == 1
assert len(symbols_version) == 27
assert symbol_version_definition[0].hash == 0x63ca0e
assert symbol_version_definition[0].version == 1
assert symbol_version_definition[0].flags == 1
assert symbol_version_definition[0].auxiliary_symbols[0].name == "all-32.bin"
assert symbol_version_definition[1].auxiliary_symbols[0].name == "LIBSIMPLE_1.0"
assert symbols_version_requirement[0].name == "libc.so.6"
assert symbols_version_requirement[0].version == 1
assert symbols_version[0].value == 0
def test_notes():
notes = TARGET.notes
assert len(notes) == 2
assert notes[0].details.abi == lief.ELF.NOTE_ABIS.LINUX
assert notes[0].description == [0, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
assert notes[0].name == "GNU"
assert notes[0].type == lief.ELF.NOTE_TYPES.ABI_TAG
assert notes[0].details.version == [3, 2, 0]
def test_symbols_sections():
"""
Related to this issue: https://github.com/lief-project/LIEF/issues/841
"""
elf = lief.parse(get_sample('ELF/ELF64_x86-64_binary_all.bin'))
main = elf.get_static_symbol("main")
assert main.section is not None
assert main.section.name == ".text"
assert elf.get_static_symbol("__gmon_start__").section is None
assert elf.get_static_symbol("_fini").section.name == ".fini" |
299,731 | summary | # Copyright (C) 2023 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
from __future__ import annotations
from copy import deepcopy
from enum import Enum
from typing import Any, Sequence
from django.core.exceptions import ValidationError
from django.db import models
from django.forms.models import model_to_dict
from cvat.apps.engine.models import Job, ShapeType, Task
class AnnotationConflictType(str, Enum):
MISSING_ANNOTATION = "missing_annotation"
EXTRA_ANNOTATION = "extra_annotation"
MISMATCHING_LABEL = "mismatching_label"
LOW_OVERLAP = "low_overlap"
MISMATCHING_DIRECTION = "mismatching_direction"
MISMATCHING_ATTRIBUTES = "mismatching_attributes"
MISMATCHING_GROUPS = "mismatching_groups"
COVERED_ANNOTATION = "covered_annotation"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class AnnotationConflictSeverity(str, Enum):
WARNING = "warning"
ERROR = "error"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class MismatchingAnnotationKind(str, Enum):
ATTRIBUTE = "attribute"
LABEL = "label"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class QualityReportTarget(str, Enum):
JOB = "job"
TASK = "task"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class QualityReport(models.Model):
job = models.ForeignKey(
Job, on_delete=models.CASCADE, related_name="quality_reports", null=True, blank=True
)
task = models.ForeignKey(
Task, on_delete=models.CASCADE, related_name="quality_reports", null=True, blank=True
)
parent = models.ForeignKey(
"self", on_delete=models.CASCADE, related_name="children", null=True, blank=True
)
children: Sequence[QualityReport]
created_date = models.DateTimeField(auto_now_add=True)
target_last_updated = models.DateTimeField()
gt_last_updated = models.DateTimeField()
data = models.JSONField()
conflicts: Sequence[AnnotationConflict]
@property
def target(self) -> QualityReportTarget:
if self.job:
return QualityReportTarget.JOB
elif self.task:
return QualityReportTarget.TASK
else:
assert False
def _parse_report(self):
from cvat.apps.quality_control.quality_reports import ComparisonReport
return ComparisonReport.from_json(self.data)
@property
def METHOD_NAME(self):
report = self._parse_report()
return report.comparison_summary
def get_task(self) -> Task:
if self.task is not None:
return self.task
else:
return self.job.segment.task
def get_json_report(self) -> str:
return self.data
def clean(self):
if not (self.job is not None) ^ (self.task is not None):
raise ValidationError("One of the 'job' and 'task' fields must be set")
@property
def organization_id(self):
if task := self.get_task():
return getattr(task.organization, "id", None)
return None
class AnnotationConflict(models.Model):
report = models.ForeignKey(QualityReport, on_delete=models.CASCADE, related_name="conflicts")
frame = models.PositiveIntegerField()
type = models.CharField(max_length=32, choices=AnnotationConflictType.choices())
severity = models.CharField(max_length=32, choices=AnnotationConflictSeverity.choices())
annotation_ids: Sequence[AnnotationId]
@property
def organization_id(self):
return self.report.organization_id
class AnnotationType(str, Enum):
TAG = "tag"
SHAPE = "shape"
TRACK = "track"
def __str__(self) -> str:
return self.value
@classmethod
def choices(cls):
return tuple((x.value, x.name) for x in cls)
class AnnotationId(models.Model):
conflict = models.ForeignKey(
AnnotationConflict, on_delete=models.CASCADE, related_name="annotation_ids"
)
obj_id = models.PositiveIntegerField()
job_id = models.PositiveIntegerField()
type = models.CharField(max_length=32, choices=AnnotationType.choices())
shape_type = models.CharField(
max_length=32, choices=ShapeType.choices(), null=True, default=None
)
def clean(self) -> None:
if self.type in [AnnotationType.SHAPE, AnnotationType.TRACK]:
if not self.shape_type:
raise ValidationError("Annotation kind must be specified")
elif self.type == AnnotationType.TAG:
if self.shape_type:
raise ValidationError("Annotation kind must be empty")
else:
raise ValidationError(f"Unexpected type value '{self.type}'")
class QualitySettings(models.Model):
task = models.OneToOneField(Task, on_delete=models.CASCADE, related_name="quality_settings")
iou_threshold = models.FloatField()
oks_sigma = models.FloatField()
line_thickness = models.FloatField()
low_overlap_threshold = models.FloatField()
compare_line_orientation = models.BooleanField()
line_orientation_threshold = models.FloatField()
compare_groups = models.BooleanField()
group_match_threshold = models.FloatField()
check_covered_annotations = models.BooleanField()
object_visibility_threshold = models.FloatField()
panoptic_comparison = models.BooleanField()
compare_attributes = models.BooleanField()
def __init__(self, *args: Any, **kwargs: Any) -> None:
defaults = deepcopy(self.get_defaults())
for field in self._meta.fields:
if field.name in defaults:
field.default = defaults[field.name]
super().__init__(*args, **kwargs)
@classmethod
def get_defaults(cls) -> dict:
import cvat.apps.quality_control.quality_reports as qc
default_settings = qc.DatasetComparator.DEFAULT_SETTINGS.to_dict()
existing_fields = {f.name for f in cls._meta.fields}
return {k: v for k, v in default_settings.items() if k in existing_fields}
def to_dict(self):
return model_to_dict(self)
@property
def organization_id(self):
return getattr(self.task.organization, "id", None) |
299,732 | state dict | import warnings
import torch
import torch.distributed.algorithms.model_averaging.averagers as averagers
class PostLocalSGDOptimizer(torch.optim.Optimizer):
r"""
Wraps an arbitrary :class:`torch.optim.Optimizer` and runs `post-local SGD <https://arxiv.org/abs/1808.07217>`_,
This optimizer runs local optimizer at every step.
After the warm-up stage, it averages parameters periodically afer the local optimizer is applied.
Args:
optim: The local optimizer.
averager: A model averager instance to run post-localSGD algorithm.
Example::
>>> # xdoctest: +SKIP("undefined variables")
>>> import torch
>>> import torch.distributed as dist
>>> import torch.distributed.algorithms.model_averaging.averagers as averagers
>>> import torch.nn as nn
>>> from torch.distributed.optim import PostLocalSGDOptimizer
>>> from torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook import (
>>> PostLocalSGDState,
>>> post_localSGD_hook,
>>> )
>>>
>>> model = nn.parallel.DistributedDataParallel(
>>> module, device_ids=[rank], output_device=rank
>>> )
>>>
>>> # Register a post-localSGD communication hook.
>>> state = PostLocalSGDState(process_group=None, subgroup=None, start_localSGD_iter=100)
>>> model.register_comm_hook(state, post_localSGD_hook)
>>>
>>> # Create a post-localSGD optimizer that wraps a local optimizer.
>>> # Note that ``warmup_steps`` used in ``PostLocalSGDOptimizer`` must be the same as
>>> # ``start_localSGD_iter`` used in ``PostLocalSGDState``.
>>> local_optim = torch.optim.SGD(params=model.parameters(), lr=0.01)
>>> opt = PostLocalSGDOptimizer(
>>> optim=local_optim,
>>> averager=averagers.PeriodicModelAverager(period=4, warmup_steps=100)
>>> )
>>>
>>> # In the first 100 steps, DDP runs global gradient averaging at every step.
>>> # After 100 steps, DDP runs gradient averaging within each subgroup (intra-node by default),
>>> # and post-localSGD optimizer runs global model averaging every 4 steps after applying the local optimizer.
>>> for step in range(0, 200):
>>> opt.zero_grad()
>>> loss = loss_fn(output, labels)
>>> loss.backward()
>>> opt.step()
"""
def __init__(self, optim: torch.optim.Optimizer, averager: averagers.ModelAverager):
self.optim = optim
self.param_groups = self.optim.param_groups
self.averager = averager
@property
def state(self):
return self.optim.state
def __repr__(self):
return self.optim.__repr__()
def METHOD_NAME(self):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`state_dict`,
but adds an extra entry to record model averager's step to the checkpoint
to ensure reload does not cause unnecessary warm up again.
"""
optim_state_dict = self.optim.METHOD_NAME()
optim_state_dict["step"] = self.averager.step
return optim_state_dict
def load_state_dict(self, METHOD_NAME):
r"""
This is the same as :class:`torch.optim.Optimizer` :meth:`load_state_dict`,
but also restores model averager's step value to the one
saved in the provided ``state_dict``.
If there is no ``"step"`` entry in ``state_dict``,
it will raise a warning and initialize the model averager's step to 0.
"""
self.optim.load_state_dict(METHOD_NAME)
if "step" in METHOD_NAME:
self.averager.step = METHOD_NAME["step"]
else:
warnings.warn(
"Loaded state dict does not contain a step counter for an averager. "
"Setting step counter to 0."
)
self.averager.step = 0
def step(self):
r"""
Performs a single optimization step (parameter update).
"""
self.optim.step()
self.averager.average_parameters(params=self.param_groups)
def zero_grad(self, set_to_none: bool = True): # type: ignore[override]
self.optim.zero_grad(set_to_none=set_to_none)
def add_param_group(self, param_group):
self.optim.add_param_group(param_group) |
299,733 | init | # Janssen Project software is available under the Apache 2.0 License (2004). See http://www.apache.org/licenses/ for full text.
# Copyright (c) 2020, Janssen Project
#
# Author: Madhumita Subramaniam
#
from io.jans.service.cdi.util import CdiUtil
from io.jans.as.server.security import Identity
from io.jans.model.custom.script.type.auth import PersonAuthenticationType
from io.jans.as.server.service import AuthenticationService, UserService
from io.jans.util import StringHelper
from io.jans.as.server.util import ServerUtil
from io.jans.as.common.model.common import User
from io.jans.orm import PersistenceEntryManager
from io.jans.as.persistence.model.configuration import GluuConfiguration
from java.math import BigInteger
from java.security import SecureRandom
import java
import sys
import json
from java.util import Collections, HashMap, HashSet, ArrayList, Arrays, Date
from com.google.api.client.googleapis.auth.oauth2 import GoogleIdToken
from com.google.api.client.googleapis.auth.oauth2.GoogleIdToken import Payload
from com.google.api.client.googleapis.auth.oauth2 import GoogleIdTokenVerifier
from com.google.api.client.http.javanet import NetHttpTransport;
from com.google.api.client.json.jackson2 import JacksonFactory;
class PersonAuthentication(PersonAuthenticationType):
def __init__(self, currentTimeMillis):
self.currentTimeMillis = currentTimeMillis
def METHOD_NAME(self, customScript, configurationAttributes):
print "Google. Initialization"
google_creds_file = configurationAttributes.get("google_creds_file").getValue2()
# Load credentials from file
f = open(google_creds_file, 'r')
try:
data = json.loads(f.read())
print data
creds = data["web"]
print creds
except:
print "Google. Initialization. Failed to load creds from file:", google_creds_file
print "Exception: ", sys.exc_info()[1]
return False
finally:
f.close()
self.client_id = str(creds["client_id"])
self.project_id = str(creds["project_id"])
self.auth_uri = str(creds["auth_uri"])
self.token_uri = str(creds["token_uri"])
self.auth_provider_x509_cert_url = str(creds["auth_provider_x509_cert_url"])
self.redirect_uris = str(creds["redirect_uris"])
self.javascript_origins = str(creds["javascript_origins"])
print "Google. Initialized successfully"
return True
def destroy(self, configurationAttributes):
print "Google. Destroy"
print "Google. Destroyed successfully"
return True
def getAuthenticationMethodClaims(self, requestParameters):
return None
def getApiVersion(self):
return 11
def isValidAuthenticationMethod(self, usageType, configurationAttributes):
return True
def getAlternativeAuthenticationMethod(self, usageType, configurationAttributes):
return None
def authenticate(self, configurationAttributes, requestParameters, step):
authenticationService = CdiUtil.bean(AuthenticationService)
if (step == 1):
print "Google. Authenticate for step 1"
identity = CdiUtil.bean(Identity)
googleCred = ServerUtil.getFirstValue(requestParameters, "credential")
if googleCred is not None:
googleIdToken = ServerUtil.getFirstValue(requestParameters, "credential")
google_Id = self.verifyIDToken(googleIdToken)
# if user doesnt exist in persistence, add
foundUser = self.findUserByGoogleId(google_Id)
if foundUser is None:
foundUser = User()
foundUser.setAttribute("jansExtUid", "passport-google:"+google_Id)
foundUser.setAttribute(self.getLocalPrimaryKey(),google_Id)
userService = CdiUtil.bean(UserService)
result = userService.addUser(foundUser, True)
foundUser = self.findUserByGoogleId(google_Id)
logged_in = authenticationService.authenticate(foundUser.getUserId())
return logged_in
else:
credentials = identity.getCredentials()
user_name = credentials.getUsername()
user_password = credentials.getPassword()
logged_in = False
if (StringHelper.isNotEmptyString(user_name) and StringHelper.isNotEmptyString(user_password)):
logged_in = authenticationService.authenticate(user_name, user_password)
return logged_in
else:
print "Google. Authenticate Error"
return False
def verifyIDToken(self, googleIdToken):
verifier = GoogleIdTokenVerifier.Builder(NetHttpTransport(), JacksonFactory()).setAudience(Collections.singletonList(self.client_id)).build()
# the GoogleIdTokenVerifier.verify() method verifies the JWT signature, the aud claim, the iss claim, and the exp claim.
idToken = verifier.verify(googleIdToken)
if idToken is not None:
payload = idToken.getPayload()
userId = payload.getSubject()
print "User ID: %s" % userId
#email = payload.getEmail()
#emailVerified = Boolean.valueOf(payload.getEmailVerified())
#name = str( payload.get("name"))
#pictureUrl = str(payload.get("picture"))
#locale = str( payload.get("locale"))
#familyName = str( payload.get("family_name"))
#givenName = str( payload.get("given_name"))
return userId
else :
print "Invalid ID token."
return None
def findUserByGoogleId(self, googleId):
userService = CdiUtil.bean(UserService)
return userService.getUserByAttribute("jansExtUid", "passport-google:"+googleId)
def getLocalPrimaryKey(self):
entryManager = CdiUtil.bean(PersistenceEntryManager)
config = GluuConfiguration()
config = entryManager.find(config.getClass(), "ou=configuration,o=jans")
#Pick (one) attribute where user id is stored (e.g. uid/mail)
# primaryKey is the primary key on the backend AD / LDAP Server
# localPrimaryKey is the primary key on Gluu. This attr value has been mapped with the primary key attr of the backend AD / LDAP when configuring cache refresh
uid_attr = config.getIdpAuthn().get(0).getConfig().findValue("localPrimaryKey").asText()
print "Casa. init. uid attribute is '%s'" % uid_attr
return uid_attr
def prepareForStep(self, configurationAttributes, requestParameters, step):
if (step == 1):
print "Google. Prepare for Step 1"
identity = CdiUtil.bean(Identity)
identity.setWorkingParameter("gclient_id",self.client_id)
return True
else:
return False
def getExtraParametersForStep(self, configurationAttributes, step):
return None
def getCountAuthenticationSteps(self, configurationAttributes):
return 1
def getPageForStep(self, configurationAttributes, step):
if(step == 1):
return "/auth/google/login.xhtml"
return ""
def getNextStep(self, configurationAttributes, requestParameters, step):
return -1
def getLogoutExternalUrl(self, configurationAttributes, requestParameters):
print "Get external logout URL call"
return None
def logout(self, configurationAttributes, requestParameters):
return True |
299,734 | preprocess | # Copyright 2019 Xilinx Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import collections
import copy
import datetime
import random
import string
import tensorflow as tf
from tensorflow_model_optimization.python.core.quantization.keras.vitis.utils.model_utils import (
is_quantize_layer, is_layer_wrapper, is_subclass_layer, is_subclass_model,
get_sub_layers_dict, set_sub_layer_weights, show_sub_layers_tree)
from tensorflow_model_optimization.python.core.quantization.keras.vitis.layers.subclass_layers import keras_layers_wrapper
from tensorflow_model_optimization.python.core.quantization.keras.vitis.layers.subclass_layers import subclass_layers_replacer
from .replacements import Replacement
class SubclassReplacer(Replacement):
"""Replace the subclass with a quantizable one."""
def __init__(self):
"""These parameters are used for replacer."""
self.config = None
self.replacer = subclass_layers_replacer.SubclassLayersReplacer()
def _replace_subclass_layer(self, layer, parent):
"""Replace subclass layer with a quantizable one."""
if parent is not None:
pass
if not is_subclass_layer(layer):
return None
qlayer = self.replacer.apply(layer)
if not qlayer is None:
set_sub_layer_weights(layer, qlayer)
return qlayer
def _recreate_subclass_model(self, model):
"""Recreate a new quantizable model."""
if not is_subclass_model(model):
return None
qmodel = self.replacer.apply(model)
if not qmodel is None:
set_sub_layer_weights(model, qmodel)
return qmodel
def METHOD_NAME(self, model, inputs=None):
"""Do some preprocess."""
# Get config from the model
self.replacer.get_model_config(model)
# Create quantizable model and build
self.replacer.build_model(model, inputs)
# Try to recreate a new quantizable model
return self._recreate_subclass_model(model)
def work(self, model, inputs):
"""Subclass replacer gets to work."""
show_sub_layers_tree(model, caption_str='original subclasses')
qmodel = self.METHOD_NAME(model, inputs)
if not qmodel is None:
show_sub_layers_tree(qmodel, caption_str='recreate subclasses')
return qmodel # recreate a new model, return directly
self.worker = self._replace_subclass_layer
self._traverse_sub_layers(model)
show_sub_layers_tree(model, caption_str='replaced subclasses')
return model
class SublayerWrapper(Replacement):
"""Wrap sublayers within a subclass layer for quantization."""
def __init__(self, quantize_registry=None, mode="QCB"):
"""These parameters are needed by Vitis Wrapper."""
self.quantize_registry = quantize_registry
self.mode = mode
self.wrapper = keras_layers_wrapper.KerasLayersWrapper(
self.quantize_registry, self.mode)
def _wrap_keras_layer(self, layer, parent=None):
"""Wrap keras layer for quantization."""
if parent is not None and not parent.__class__.__name__.startswith("Quant"):
return None
if is_subclass_layer(layer) or is_quantize_layer(layer):
return None
qlayer = self.wrapper.apply(layer)
if not qlayer is None:
pass
return qlayer
def _remove_keras_layer(self, layer, parent=None):
"""Remove keras layer by replacing it with identity layer."""
if parent is not None and not parent.__class__.__name__.startswith("Quant"):
return None
if is_subclass_layer(layer) or is_quantize_layer(layer):
return None
qlayer = self.wrapper.remove(layer)
if not qlayer is None:
pass
return qlayer
def _rename_sub_layers(self, subclass, hierarchy=''):
"""Rename leaf sublayers to avoid having duplicated names."""
def _rename_sublayer(layer, hierarchy):
if is_layer_wrapper(layer):
layer.layer._name = hierarchy + layer.layer.name
else:
layer._name = hierarchy + layer.name
sub_layers_dict = get_sub_layers_dict(subclass)
for value in sub_layers_dict.values():
if isinstance(value, list):
for layer in value:
if is_subclass_layer(layer):
self._rename_sub_layers(layer, hierarchy + layer.name + '/')
elif is_quantize_layer(layer):
_rename_sublayer(layer, hierarchy)
else:
layer = value
if is_subclass_layer(layer):
self._rename_sub_layers(layer, hierarchy + layer.name + '/')
elif is_quantize_layer(layer):
_rename_sublayer(layer, hierarchy)
def postprocess(self, model, inputs=None, **kwargs):
"""Do some postprocess."""
# Remove the keras dropout sublayers
if 'remove_dropout' in kwargs and kwargs['remove_dropout'] == True:
self.worker = self._remove_keras_layer
self._traverse_sub_layers(model)
# Execute a prediction to initialize quantize layers variables, otherwise
# we cannot copy weights from a quantized model to another, for example,
# coping weights from a 'QCB' model to a 'QAT' model for initialization.
if inputs is not None:
model.predict(inputs, batch_size=1, steps=1)
# Compile the model to clear model.predict_function cache.
optimizer = 'adam' if (model.optimizer is None) else model.optimizer
model.compile(optimizer=optimizer, loss=None)
# Rename the quantized sublayers to discriminate them
if 'rename_sublayers' in kwargs and kwargs['rename_sublayers'] == True:
self._rename_sub_layers(model, model.name + '/')
return model
def work(self, model, inputs, **kwargs):
"""Sublayer wrapper gets to work."""
self.worker = self._wrap_keras_layer
self._traverse_sub_layers(model)
show_sub_layers_tree(model, caption_str='wrapped sublayers')
if 'remove_dropout' not in kwargs or kwargs['remove_dropout'] == True:
self.worker = self._remove_keras_layer
self._traverse_sub_layers(model)
show_sub_layers_tree(model, caption_str='removed sublayers')
self.postprocess(model, inputs=inputs, rename_sublayers=True)
show_sub_layers_tree(model, caption_str='renamed sublayers', show_leaf=True)
return model |
299,735 | verify cloud hypervisor integration tests | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from pathlib import Path
from typing import Any, Dict
from lisa import (
Environment,
Logger,
Node,
TestCaseMetadata,
TestSuite,
TestSuiteMetadata,
node_requirement,
schema,
search_space,
)
from lisa.operating_system import CBLMariner, Ubuntu
from lisa.testsuite import TestResult
from lisa.tools import Ls, Lscpu, Modprobe, Usermod
from lisa.util import SkippedException
from microsoft.testsuites.cloud_hypervisor.ch_tests_tool import CloudHypervisorTests
@TestSuiteMetadata(
area="cloud-hypervisor",
category="community",
description="""
This test suite is for executing the tests maintained in the
upstream cloud-hypervisor repo.
""",
)
class CloudHypervisorTestSuite(TestSuite):
def before_case(self, log: Logger, **kwargs: Any) -> None:
node = kwargs["node"]
if not isinstance(node.os, (CBLMariner, Ubuntu)):
raise SkippedException(
f"Cloud Hypervisor tests are not implemented in LISA for {node.os.name}"
)
node.tools[Modprobe].load("openvswitch")
self._ensure_virtualization_enabled(node)
variables: Dict[str, Any] = kwargs["variables"]
use_ms_clh_repo = variables.get("use_ms_clh_repo", None)
if use_ms_clh_repo == "yes":
self._set_ms_clh_param(variables)
def after_case(self, log: Logger, **kwargs: Any) -> None:
node = kwargs["node"]
node.tools[Modprobe].remove(["openvswitch"])
@TestCaseMetadata(
description="""
Runs cloud-hypervisor integration tests.
""",
priority=3,
timeout=CloudHypervisorTests.CASE_TIME_OUT,
requirement=node_requirement(
node=schema.NodeSpace(
core_count=search_space.IntRange(min=16),
memory_mb=search_space.IntRange(min=16 * 1024),
),
),
)
def METHOD_NAME(
self,
log: Logger,
node: Node,
environment: Environment,
log_path: Path,
result: TestResult,
variables: Dict[str, Any],
) -> None:
hypervisor = self._get_hypervisor_param(node)
ref = variables.get("cloudhypervisor_ref", "")
# below variable expects a comma separated list of full testnames
include_list, exclude_list = get_test_list(
variables, "ch_integration_tests_included", "ch_integration_tests_excluded"
)
node.tools[CloudHypervisorTests].run_tests(
result,
environment,
"integration",
hypervisor,
log_path,
ref,
include_list,
exclude_list,
)
@TestCaseMetadata(
description="""
Runs cloud-hypervisor live migration tests.
""",
priority=3,
timeout=CloudHypervisorTests.CASE_TIME_OUT,
requirement=node_requirement(
node=schema.NodeSpace(
core_count=search_space.IntRange(min=16),
memory_mb=search_space.IntRange(min=16 * 1024),
),
),
)
def verify_cloud_hypervisor_live_migration_tests(
self,
log: Logger,
node: Node,
environment: Environment,
log_path: Path,
result: TestResult,
variables: Dict[str, Any],
) -> None:
hypervisor = self._get_hypervisor_param(node)
ref = variables.get("cloudhypervisor_ref", "")
# below variable expects a comma separated list of full testnames
include_list, exclude_list = get_test_list(
variables,
"ch_live_migration_tests_included",
"ch_live_migration_tests_excluded",
)
node.tools[CloudHypervisorTests].run_tests(
result,
environment,
"integration-live-migration",
hypervisor,
log_path,
ref,
include_list,
exclude_list,
)
@TestCaseMetadata(
description="""
Runs cloud-hypervisor performance metrics tests.
""",
priority=3,
timeout=CloudHypervisorTests.CASE_TIME_OUT,
)
def verify_cloud_hypervisor_performance_metrics_tests(
self,
log: Logger,
node: Node,
environment: Environment,
log_path: Path,
result: TestResult,
variables: Dict[str, Any],
) -> None:
hypervisor = self._get_hypervisor_param(node)
ref = variables.get("cloudhypervisor_ref", "")
# below variable expects a comma separated list of full testnames
include_list, exclude_list = get_test_list(
variables,
"ch_perf_tests_included",
"ch_perf_tests_excluded",
)
subtest_timeout = variables.get("ch_perf_subtest_timeout", None)
node.tools[CloudHypervisorTests].run_metrics_tests(
result,
environment,
hypervisor,
log_path,
ref,
include_list,
exclude_list,
subtest_timeout,
)
def _ensure_virtualization_enabled(self, node: Node) -> None:
virtualization_enabled = node.tools[Lscpu].is_virtualization_enabled()
mshv_exists = node.tools[Ls].path_exists(path="/dev/mshv", sudo=True)
if not virtualization_enabled and not mshv_exists:
raise SkippedException("Virtualization is not enabled in hardware")
# add user to mshv group for access to /dev/mshv
if mshv_exists:
node.tools[Usermod].add_user_to_group("mshv", sudo=True)
def _get_hypervisor_param(self, node: Node) -> str:
kvm_exists = node.tools[Ls].path_exists(path="/dev/kvm", sudo=True)
if kvm_exists:
return "kvm"
mshv_exists = node.tools[Ls].path_exists(path="/dev/mshv", sudo=True)
if mshv_exists:
return "mshv"
return ""
def _set_ms_clh_param(self, variables: Dict[str, Any]) -> None:
# Get access token from testing infra to clone the repo
ms_access_token = variables.get("ms_access_token", None)
# Get URL for MS CLH repo
ms_clh_repo = variables.get("ms_clh_repo", None)
# Get URL for igvm-parser repo
ms_igvm_parser_repo = variables.get("ms_igvm_parser_repo", None)
# Get GUEST VM type, set default to NON-CVM
clh_guest_vm_type = variables.get("clh_guest_vm_type", "NON-CVM")
if not ms_access_token:
raise SkippedException("Access Token is needed while using MS-CLH")
if not ms_clh_repo:
raise SkippedException("CLH URL is needed while using MS-CLH")
if not ms_igvm_parser_repo:
raise SkippedException("IGVM-Parser URL is needed while using MS-CLH")
CloudHypervisorTests.use_ms_clh_repo = True
CloudHypervisorTests.ms_access_token = ms_access_token
CloudHypervisorTests.ms_clh_repo = ms_clh_repo
CloudHypervisorTests.ms_igvm_parser_repo = ms_igvm_parser_repo
CloudHypervisorTests.clh_guest_vm_type = clh_guest_vm_type
def get_test_list(variables: Dict[str, Any], var1: str, var2: str) -> Any:
tests_raw = variables.get(var1, "")
test_list1 = tests_raw.split(",") if tests_raw else None
tests_raw = variables.get(var2, "")
test_list2 = tests_raw.split(",") if tests_raw else None
return test_list1, test_list2 |
299,736 | generate api | #!/usr/bin/env python3
import os
import argparse
import genapi
from genapi import \
TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi
import numpy_api
# use annotated api when running under cpychecker
h_template = r"""
#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)
typedef struct {
PyObject_HEAD
npy_bool obval;
} PyBoolScalarObject;
extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;
extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;
extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];
%s
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL
#endif
#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)
extern void **PyArray_API;
#else
#if defined(PY_ARRAY_UNIQUE_SYMBOL)
void **PyArray_API;
#else
static void **PyArray_API=NULL;
#endif
#endif
%s
#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)
static int
_import_array(void)
{
int st;
PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath");
PyObject *c_api = NULL;
if (numpy == NULL) {
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
Py_DECREF(numpy);
if (c_api == NULL) {
PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found");
return -1;
}
if (!PyCapsule_CheckExact(c_api)) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object");
Py_DECREF(c_api);
return -1;
}
PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);
Py_DECREF(c_api);
if (PyArray_API == NULL) {
PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer");
return -1;
}
/*
* Perform runtime check of C API version. As of now NumPy 2.0 is ABI
* backwards compatible (in the exposed feature subset!) for all practical
* purposes.
*/
if (NPY_VERSION < PyArray_GetNDArrayCVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"ABI version 0x%%x but this version of numpy is 0x%%x", \
(int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
return -1;
}
if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
PyErr_Format(PyExc_RuntimeError, "module compiled against "\
"API version 0x%%x but this version of numpy is 0x%%x . "\
"Check the section C-API incompatibility at the "\
"Troubleshooting ImportError section at "\
"https://numpy.org/devdocs/user/troubleshooting-importerror.html"\
"#c-api-incompatibility "\
"for indications on how to solve this problem .", \
(int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
return -1;
}
/*
* Perform runtime check of endianness and check it matches the one set by
* the headers (npy_endian.h) as a safeguard
*/
st = PyArray_GetEndianness();
if (st == NPY_CPU_UNKNOWN_ENDIAN) {
PyErr_SetString(PyExc_RuntimeError,
"FATAL: module compiled as unknown endian");
return -1;
}
#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN
if (st != NPY_CPU_BIG) {
PyErr_SetString(PyExc_RuntimeError,
"FATAL: module compiled as big endian, but "
"detected different endianness at runtime");
return -1;
}
#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
if (st != NPY_CPU_LITTLE) {
PyErr_SetString(PyExc_RuntimeError,
"FATAL: module compiled as little endian, but "
"detected different endianness at runtime");
return -1;
}
#endif
return 0;
}
#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NULL; } }
#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } }
#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }
#endif
#endif
"""
c_template = r"""
/* These pointers will be stored in the C-object for use in other
extension modules
*/
void *PyArray_API[] = {
%s
};
"""
def METHOD_NAME(output_dir, force=False):
basename = 'multiarray_api'
h_file = os.path.join(output_dir, '__%s.h' % basename)
c_file = os.path.join(output_dir, '__%s.c' % basename)
targets = (h_file, c_file)
sources = numpy_api.multiarray_api
if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):
return targets
else:
do_generate_api(targets, sources)
return targets
def do_generate_api(targets, sources):
header_file = targets[0]
c_file = targets[1]
global_vars = sources[0]
scalar_bool_values = sources[1]
types_api = sources[2]
multiarray_funcs = sources[3]
multiarray_api = sources[:]
module_list = []
extension_list = []
init_list = []
# Check multiarray api indexes
multiarray_api_index = genapi.merge_api_dicts(multiarray_api)
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
multiarray_funcs)
# Create dict name -> *Api instance
api_name = 'PyArray_API'
multiarray_api_dict = {}
for f in numpyapi_list:
name = f.name
index = multiarray_funcs[name][0]
annotations = multiarray_funcs[name][1:]
multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,
f.return_type,
f.args, api_name)
for name, val in global_vars.items():
index, type = val
multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)
for name, val in scalar_bool_values.items():
index = val[0]
multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)
for name, val in types_api.items():
index = val[0]
internal_type = None if len(val) == 1 else val[1]
multiarray_api_dict[name] = TypeApi(
name, index, 'PyTypeObject', api_name, internal_type)
if len(multiarray_api_dict) != len(multiarray_api_index):
keys_dict = set(multiarray_api_dict.keys())
keys_index = set(multiarray_api_index.keys())
raise AssertionError(
"Multiarray API size mismatch - "
"index has extra keys {}, dict has extra keys {}"
.format(keys_index - keys_dict, keys_dict - keys_index)
)
extension_list = []
for name, index in genapi.order_dict(multiarray_api_index):
api_item = multiarray_api_dict[name]
# In NumPy 2.0 the API may have holes (which may be filled again)
# in that case, add `NULL` to fill it.
while len(init_list) < api_item.index:
init_list.append(" NULL")
extension_list.append(api_item.define_from_array_api_string())
init_list.append(api_item.array_api_define())
module_list.append(api_item.internal_define())
# Write to header
s = h_template % ('\n'.join(module_list), '\n'.join(extension_list))
genapi.write_file(header_file, s)
# Write to c-code
s = c_template % ',\n'.join(init_list)
genapi.write_file(c_file, s)
return targets
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-o",
"--outdir",
type=str,
help="Path to the output directory"
)
parser.add_argument(
"-i",
"--ignore",
type=str,
help="An ignored input - may be useful to add a "
"dependency between custom targets"
)
args = parser.parse_args()
outdir_abs = os.path.join(os.getcwd(), args.outdir)
METHOD_NAME(outdir_abs)
if __name__ == "__main__":
main() |
299,737 | send | #!/usr/bin/env python3
# Copyright (C) 2018 Simon Brummer <simon.brummer@posteo.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import sys
import os
import re
import socket
import random
import testrunner
class Runner:
def __init__(self, timeout, echo=False, skip=False):
self.timeout = timeout
self.echo = echo
self.skip = skip
def __call__(self, fn):
if self.skip:
print('\n- "{}": SKIPPED'.format(fn.__name__), end='')
return 0
res = -1
try:
res = testrunner.run(fn, self.timeout, self.echo)
finally:
if res == 0:
print('- "{}": SUCCESS'.format(fn.__name__), end='')
else:
print('- "{}": FAILED'.format(fn.__name__), end='')
return res
class _HostTcpNode:
def __init__(self):
self.opened = False
self.sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.interface = self._get_interface()
self.address = self._get_ip_address(self.interface)
def METHOD_NAME(self, payload_to_send):
self.sock.METHOD_NAME(payload_to_send.encode('utf-8'))
def receive(self, sent_payload):
total_bytes = len(sent_payload)
assert self.sock.recv(total_bytes, socket.MSG_WAITALL).decode('utf-8') == sent_payload
def close(self):
self.sock.close()
self.opened = False
def _get_interface(self):
# Check if given tap device is part of a network bridge
# if so use bridged interface instead of given tap device
tap = os.environ["TAPDEV"]
result = os.popen('bridge link show dev {}'.format(tap))
bridge = re.search('master (.*) state', result.read())
return bridge.group(1).strip() if bridge else tap
def _get_ip_address(self, interface):
result = os.popen('ip addr show dev ' + interface + ' scope link')
return re.search('inet6 (.*)/64', result.read()).group(1).strip()
class HostTcpServer(_HostTcpNode):
def __init__(self, listen_port):
super().__init__()
self.listening = False
self.listen_sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listen_port = listen_port
def __enter__(self):
if not self.listening:
self.listen()
return self
def __exit__(self, _1, _2, _3):
if self.listening:
self.stop_listen()
def listen(self):
self.listen_sock.bind(('::', self.listen_port))
self.listen_sock.listen(1)
self.listening = True
def accept(self):
self.sock, _ = self.listen_sock.accept()
def stop_listen(self):
self.listen_sock.close()
self.listening = False
class HostTcpClient(_HostTcpNode):
def __init__(self, target):
super().__init__()
self.target_addr = str(target.address)
self.target_port = str(target.listen_port)
def __enter__(self):
if not self.opened:
self.open()
return self
def __exit__(self, _1, _2, _3):
if self.opened:
self.close()
def open(self):
addrinfo = socket.getaddrinfo(
self.target_addr + '%' + self.interface,
self.target_port,
type=socket.SOCK_STREAM
)
self.sock.connect(addrinfo[0][-1])
self.opened = True
class _SockTcpNode:
def __init__(self, child):
self.child = child
self.connected = False
self.interface = self._get_interface()
self.address = self._get_ip_address()
def write(self, payload):
self.child.sendline('sock_tcp_write {}'.format(str(payload)))
self.child.expect_exact('sock_tcp_write: sent {}'.format(len(payload)))
def read(self, payload, timeout_ms=0):
total_bytes = str(len(payload))
self.child.sendline('sock_tcp_read {} {}'.format(total_bytes, timeout_ms))
self.child.expect_exact('sock_tcp_read: received {} {}'.format(
total_bytes, payload)
)
def disconnect(self):
self.child.sendline('sock_tcp_disconnect')
self.child.expect_exact('sock_tcp_disconnect: returns')
self.connected = False
def get_local(self):
self.child.sendline('sock_tcp_get_local')
self.child.expect_exact('sock_tcp_get_local: returns 0')
def get_remote(self):
self.child.sendline('sock_tcp_get_remote')
self.child.expect_exact('sock_tcp_get_remote: returns 0')
def _get_interface(self):
self.child.sendline('ifconfig')
self.child.expect(r'Iface\s+(\d+)\s')
return self.child.match.group(1).strip()
def _get_ip_address(self):
self.child.sendline('ifconfig')
self.child.expect(r'(fe80:[0-9a-f:]+)\s')
return self.child.match.group(1).strip()
class SockTcpServer(_SockTcpNode):
def __init__(self, child, listen_port, listen_addr='::'):
super().__init__(child)
self.listening = False
self.listen_port = str(listen_port)
self.listen_addr = str(listen_addr)
def __enter__(self):
if not self.listening:
self.listen()
return self
def __exit__(self, _1, _2, _3):
if self.listening:
self.stop_listen()
def listen(self):
self.child.sendline('sock_tcp_listen [{}]:{}'.format(
self.listen_addr, self.listen_port)
)
self.child.expect_exact('sock_tcp_listen: returns 0')
self.listening = True
def accept(self, timeout_ms):
self.child.sendline('sock_tcp_accept {}'.format(str(timeout_ms)))
self.child.expect_exact('sock_tcp_accept: returns 0')
self.opened = True
def stop_listen(self):
self.child.sendline('sock_tcp_stop_listen')
self.child.expect_exact('sock_tcp_stop_listen: returns')
self.listening = False
def queue_get_local(self):
self.child.sendline('sock_tcp_queue_get_local')
self.child.expect_exact('sock_tcp_queue_get_local: returns 0')
class SockTcpClient(_SockTcpNode):
def __init__(self, child, target, local_port=0):
super().__init__(child)
self.target_addr = target.address + '%' + self.interface
self.target_port = str(target.listen_port)
self.local_port = local_port
def __enter__(self):
if not self.connected:
self.connect()
return self
def __exit__(self, _1, _2, _3):
if self.connected:
self.disconnect()
def connect(self):
self.child.sendline('sock_tcp_connect [{}]:{} {}'.format(
self.target_addr, self.target_port, self.local_port)
)
self.child.expect_exact('sock_tcp_connect: returns 0')
self.connected = True
def generate_port_number():
return random.randint(1024, 65535)
def sudo_guard(uses_scapy=False):
sudo_required = uses_scapy or (os.environ.get("BOARD", "") != "native")
if sudo_required and os.geteuid() != 0:
print("\x1b[1;31mThis test requires root privileges.\n"
"It uses `./dist/tools/ethos/start_networking.sh` as term" +
(" and it's constructing and sending Ethernet frames."
if uses_scapy else "") + "\x1b[0m\n",
file=sys.stderr)
sys.exit(1) |
299,738 | get content md5 | import weakref
from base64 import urlsafe_b64encode, b64encode
from collections import deque
from datetime import datetime
from gzip import GzipFile
import hashlib
import os
import re
from io import RawIOBase
from corehq.blobs.exceptions import BadName, GzipStreamError
SAFENAME = re.compile("^[a-z0-9_./{}-]+$", re.IGNORECASE)
class GzipStream:
"""Wrapper for a file like object that compresses the data as it is read
Adapted from https://stackoverflow.com/a/31566082
"""
CHUNK_SIZE = 4096
def __init__(self, fileobj):
self._input = fileobj
self._buf = _IoBuffer()
self._gzip = GzipFile(None, mode='wb', fileobj=self._buf)
self._content_length = 0
@property
def content_length(self):
"""Size of uncompressed data
Can only be accessed once stream has beenfully read.
"""
if not self._gzip.closed or self._content_length is None:
raise GzipStreamError("cannot read length before full stream")
return self._content_length
def read(self, size=-1):
while size < 0 or len(self._buf) < size:
chunk = self._input.read(self.CHUNK_SIZE)
if not chunk:
self._gzip.close()
break
self._content_length += len(chunk)
self._gzip.write(chunk)
return self._buf.read(size)
def close(self):
if not self._gzip.closed:
self._content_length = None
self._input.close()
self._gzip.close()
self._buf.close()
class _IoBuffer:
def __init__(self):
self.buffer = deque()
self.size = 0
def __len__(self):
return self.size
def write(self, data):
self.buffer.append(data)
self.size += len(data)
def read(self, size=-1):
if size < 0:
size = self.size
ret_list = []
while size > 0 and self.buffer:
s = self.buffer.popleft()
size -= len(s)
ret_list.append(s)
if size < 0:
ret_list[-1], remainder = ret_list[-1][:size], ret_list[-1][size:]
self.buffer.appendleft(remainder)
ret = b''.join(ret_list)
self.size -= len(ret)
return ret
def flush(self):
pass
def close(self):
self.buffer = None
self.size = 0
class document_method(object):
"""Document method
A document method is a twist between a static method and an instance
method. It can be called as a normal instance method, in which case
the first argument (`self`) is an instance of the method's class
type, or it can be called like a static method:
Document.method(obj, other, args)
in which case the first argument is passed as `self` and need not
be an instance of `Document`.
"""
def __init__(self, func):
self.func = func
def __get__(self, obj, owner):
if obj is None:
return self.func
return self.func.__get__(obj, owner)
class classproperty(object):
"""https://stackoverflow.com/a/5192374/10840"""
def __init__(self, func):
self.func = func
def __get__(self, obj, owner):
return self.func(owner)
def random_url_id(nbytes):
"""Get a random URL-safe ID string
:param nbytes: Number of random bytes to include in the ID.
:returns: A URL-safe string.
"""
return urlsafe_b64encode(os.urandom(nbytes)).decode('ascii').rstrip('=')
def check_safe_key(key):
"""Perform some basic checks on a potential blob key
This method makes a best-effort attempt to verify that the key is
safe for all blob db backends. It will not necessarily detect all
unsafe keys.
:raises: BadName if key is unsafe.
"""
if (key.startswith(("/", ".")) or
"/../" in key or
key.endswith("/..") or
not SAFENAME.match(key)):
raise BadName("unsafe key: %r" % key)
def _utcnow():
return datetime.utcnow()
def METHOD_NAME(fileobj):
"""Get Content-MD5 value
All content will be read from the current position to the end of the
file. The file will be left open with its seek position at the end
of the file.
:param fileobj: A file-like object.
:returns: RFC-1864-compliant Content-MD5 header value.
"""
md5 = hashlib.md5()
for chunk in iter(lambda: fileobj.read(1024 * 1024), b''):
md5.update(chunk)
return b64encode(md5.digest()).decode('ascii')
def set_max_connections(num_workers):
"""Set max connections for urllib3
The default is 10. When using something like gevent to process
multiple S3 connections conucurrently it is necessary to set max
connections equal to the number of workers to avoid
`WARNING Connection pool is full, discarding connection: ...`
This must be called before `get_blob_db()` is called.
See botocore.config.Config max_pool_connections
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
from django.conf import settings
from corehq.blobs import _db
def update_config(name):
config = getattr(settings, name)["config"]
config["max_pool_connections"] = num_workers
assert not _db, "get_blob_db() has been called"
for name in ["S3_BLOB_DB_SETTINGS", "OLD_S3_BLOB_DB_SETTINGS"]:
if getattr(settings, name, False):
update_config(name)
class BlobStream(RawIOBase):
"""Wrapper around the raw stream with additional properties for convenient access:
* blob_key
* content_length
* compressed_length (will be None if blob is not compressed)
"""
def __init__(self, stream, blob_db, blob_key, content_length, compressed_length):
self._obj = stream
self._blob_db = weakref.ref(blob_db)
self.blob_key = blob_key
self.content_length = content_length
self.compressed_length = compressed_length
def readable(self):
return True
def read(self, *args, **kw):
return self._obj.read(*args, **kw)
read1 = read
def write(self, *args, **kw):
raise IOError
def tell(self):
tell = getattr(self._obj, 'tell', None)
if tell is not None:
return tell()
return self._obj._amount_read
def seek(self, offset, from_what=os.SEEK_SET):
if from_what != os.SEEK_SET:
raise ValueError("seek mode not supported")
pos = self.tell()
if offset != pos:
raise ValueError("seek not supported")
return pos
def close(self):
self._obj.close()
return super(BlobStream, self).close()
def __getattr__(self, name):
return getattr(self._obj, name)
@property
def blob_db(self):
return self._blob_db()
def get_content_size(fileobj, chunks_sent):
"""
:param fileobj: content object written to the backend
:param chunks_sent: list of chunk sizes sent
:return: tuple(uncompressed_size, compressed_size or None)
"""
if isinstance(fileobj, GzipStream):
return fileobj.content_length, sum(chunks_sent)
return sum(chunks_sent), None |
299,739 | start payment | import logging
from django.http import HttpResponseBadRequest, HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
import requests
from rest_framework import serializers
from rest_framework.exceptions import ParseError
from openforms.api.fields import PrimaryKeyRelatedAsChoicesField
from openforms.config.data import Entry
from openforms.logging import logevent
from openforms.utils.mixins import JsonSchemaSerializerMixin
from ...base import BasePlugin
from ...constants import PAYMENT_STATUS_FINAL, UserAction
from ...contrib.ogone.client import OgoneClient
from ...contrib.ogone.constants import OgoneStatus
from ...contrib.ogone.exceptions import InvalidSignature
from ...contrib.ogone.models import OgoneMerchant
from ...models import SubmissionPayment
from ...registry import register
logger = logging.getLogger(__name__)
class OgoneOptionsSerializer(JsonSchemaSerializerMixin, serializers.Serializer):
merchant_id = PrimaryKeyRelatedAsChoicesField(
queryset=OgoneMerchant.objects.all(),
required=True,
help_text=_("Merchant to use"),
)
RETURN_ACTION_PARAM = "action"
@register("ogone-legacy")
class OgoneLegacyPaymentPlugin(BasePlugin):
verbose_name = _("Ogone legacy")
configuration_options = OgoneOptionsSerializer
def METHOD_NAME(self, request, payment: SubmissionPayment):
# decimal to cents
amount_cents = int((payment.amount * 100).to_integral_exact())
merchant = get_object_or_404(
OgoneMerchant, id=payment.plugin_options["merchant_id"]
)
client = OgoneClient(merchant)
return_url = self.get_return_url(request, payment)
description = (
f"{_('Submission')}: {payment.submission.public_registration_reference}"
)
info = client.get_payment_info(
payment.public_order_id,
amount_cents,
return_url,
RETURN_ACTION_PARAM,
description,
)
return info
def handle_return(self, request, payment: SubmissionPayment):
action = request.query_params.get(RETURN_ACTION_PARAM)
merchant = get_object_or_404(
OgoneMerchant, id=payment.plugin_options["merchant_id"]
)
client = OgoneClient(merchant)
try:
params = client.get_validated_params(request.query_params)
except InvalidSignature as e:
logger.warning(f"invalid SHASIGN for payment {payment}")
logevent.payment_flow_failure(payment, self, e)
return HttpResponseBadRequest("bad shasign")
self.apply_status(payment, params.STATUS)
form_url = payment.submission.cleaned_form_url
form_url.args.update(
{
"of_payment_status": payment.status,
"of_payment_id": str(payment.uuid),
"of_payment_action": action or UserAction.unknown,
}
)
return HttpResponseRedirect(form_url.url)
def handle_webhook(self, request):
# unvalidated data
order_id = case_insensitive_get(request.data, "orderID")
if not order_id:
# we use ParseError in this method because serializers.ValidationError triggers exception serializers
raise ParseError("missing orderID")
payment = get_object_or_404(SubmissionPayment, public_order_id=order_id)
merchant = get_object_or_404(
OgoneMerchant, id=payment.plugin_options["merchant_id"]
)
client = OgoneClient(merchant)
try:
params = client.get_validated_params(request.data)
except InvalidSignature as e:
logger.warning(f"invalid SHASIGN for payment {payment}")
logevent.payment_flow_failure(payment, self, e)
# see note about ParseError above
raise ParseError("bad shasign")
self.apply_status(payment, params.STATUS)
return payment
def apply_status(self, payment, ogone_status) -> None:
if payment.status in PAYMENT_STATUS_FINAL:
# shouldn't happen or race-condition
return
new_status = OgoneStatus.as_payment_status(ogone_status)
# run this query as atomic update()
qs = SubmissionPayment.objects.filter(id=payment.id)
qs = qs.exclude(status__in=PAYMENT_STATUS_FINAL)
qs = qs.exclude(status=new_status)
res = qs.update(status=new_status)
if res > 0:
payment.refresh_from_db()
@classmethod
def iter_config_checks(cls):
for merchant in OgoneMerchant.objects.all():
yield cls.check_merchant(merchant)
@classmethod
def check_merchant(cls, merchant):
entry = Entry(
name=f"{cls.verbose_name}: {merchant.label}",
actions=[
(
_("Configuration"),
reverse(
"admin:payments_ogone_ogonemerchant_change",
args=(merchant.pk,),
),
)
],
)
try:
response = requests.get(merchant.endpoint)
response.raise_for_status()
except Exception as e:
entry.status = False
entry.error = str(e)
else:
entry.status = True
return entry
def case_insensitive_get(mapping, key, default=None):
if key in mapping:
return mapping[key]
for k in mapping:
if k.upper() == key.upper():
return mapping[k]
return default |
299,740 | test index buffer 16bit | """
Low level tests for OpenGL 3.3 wrappers.
"""
import array
import struct
import pytest
from arcade.gl import BufferDescription
from arcade.gl.vertex_array import VertexArray
from arcade.gl.program import Program
def test_buffer_description(ctx):
# TODO: components > 4
# TODO: padding
buffer = ctx.buffer(reserve=4 * 8)
attribute_names = ['in_vert', 'in_uv']
descr = BufferDescription(
buffer,
'2f 2f',
attribute_names,
)
assert descr.num_vertices == 2
assert descr.buffer == buffer
assert descr.attributes == attribute_names
assert descr.instanced is False
assert len(descr.formats) == 2
assert descr.stride == 16
# Buffer parameter not a buffer
with pytest.raises(ValueError):
BufferDescription("test", "2f", ["pos"])
def test_geometry(ctx):
"""Test vertex_array"""
program = ctx.load_program(
vertex_shader=':resources:shaders/shapes/line/line_vertex_shader_vs.glsl',
fragment_shader=':resources:shaders/shapes/line/line_vertex_shader_fs.glsl',
)
num_vertices = 100
content = [
BufferDescription(
ctx.buffer(reserve=4 * num_vertices),
'4f1',
['in_color'],
normalized=['in_color'],
),
BufferDescription(
ctx.buffer(reserve=8 * num_vertices),
'2f',
['in_vert']
),
]
geo = ctx.geometry(content)
assert geo.ctx == ctx
assert geo.num_vertices == num_vertices
assert geo.index_buffer is None
geo.render(program, mode=ctx.TRIANGLES)
geo.render(program, mode=ctx.POINTS)
geo.render(program, mode=ctx.LINES)
vao = geo.instance(program)
assert isinstance(vao, VertexArray)
assert isinstance(vao.program, Program)
assert vao.num_vertices == -1
assert vao.ibo is None
geo.flush()
def test_padding(ctx):\
ctx.geometry([BufferDescription(
ctx.buffer(reserve=4 * 7 * 10),
'2f 3x4 2f',
('in_pos', 'in_vel'),
)])
def test_transform(ctx):
"""Test basic transform"""
program = ctx.program(vertex_shader="""
#version 330
out float value;
void main() {
value = float(gl_VertexID);
}
"""
)
buffer = ctx.buffer(reserve=4 * 5)
vao = ctx.geometry()
vao.transform(program, buffer, vertices=5)
assert struct.unpack('5f', buffer.read()) == (0.0, 1.0, 2.0, 3.0, 4.0)
def test_index_buffer_32bit(ctx):
"""Create a vao with 32 bit index buffer"""
program = ctx.program(
vertex_shader="""
#version 330
in vec2 in_position;
void main() {
gl_Position = vec4(in_position, 0.0, 1.0);
}
""",
fragment_shader="""
#version 330
out vec4 color;
void main() {
color = vec4(1.0);
}
""",
)
vertex_buffer = ctx.buffer(data=array.array('f', [0.0] * 2 * 4))
ibo = ctx.buffer(data=array.array('I', [0, 1, 2, 0, 1, 3]))
vao = ctx.geometry(
[
BufferDescription(vertex_buffer, "2f", ["in_position"]),
],
index_buffer=ibo,
index_element_size=4,
mode=ctx.TRIANGLES,
)
assert vao.ctx == ctx
assert vao.num_vertices == 6
assert vao.index_buffer == ibo
vao.render(program)
def METHOD_NAME(ctx):
"""Create a vao with 16 bit index buffer"""
program = ctx.program(
vertex_shader="""
#version 330
in vec2 in_position;
void main() {
gl_Position = vec4(in_position, 0.0, 1.0);
}
""",
fragment_shader="""
#version 330
out vec4 color;
void main() {
color = vec4(1.0);
}
""",
)
vertex_buffer = ctx.buffer(data=array.array('f', [0.0] * 2 * 4))
ibo = ctx.buffer(data=array.array('H', [0, 1, 2, 0, 1, 3]))
vao = ctx.geometry(
[
BufferDescription(vertex_buffer, "2f", ["in_position"]),
],
index_buffer=ibo,
index_element_size=2,
mode=ctx.TRIANGLES,
)
assert vao.ctx == ctx
assert vao.num_vertices == 6
assert vao.index_buffer == ibo
vao.render(program)
def test_index_buffer_8bit(ctx):
"""Create a vao with 8 bit index buffer"""
program = ctx.program(
vertex_shader="""
#version 330
in vec2 in_position;
void main() {
gl_Position = vec4(in_position, 0.0, 1.0);
}
""",
fragment_shader="""
#version 330
out vec4 color;
void main() {
color = vec4(1.0);
}
""",
)
vertex_buffer = ctx.buffer(data=array.array('f', [0.0] * 2 * 4))
ibo = ctx.buffer(data=array.array('B', [0, 1, 2, 0, 1, 3]))
vao = ctx.geometry(
[
BufferDescription(vertex_buffer, "2f", ["in_position"]),
],
index_buffer=ibo,
index_element_size=1,
mode=ctx.TRIANGLES,
)
assert vao.ctx == ctx
assert vao.num_vertices == 6
assert vao.index_buffer == ibo
vao.render(program)
def test_index_buffer_incorrect_type_size(ctx):
"""Attempt to use an illegal index buffer type size"""
for size in [0, 3, 5]:
with pytest.raises(ValueError):
ctx.geometry(
[
BufferDescription(ctx.buffer(reserve=16), "2f", ["in_position"]),
],
index_buffer=ctx.buffer(reserve=16),
index_element_size=size,
)
def test_incomplete_geometry(ctx):
ctx.geometry()
def test_appending_extra_buffer_description(ctx):
"""Attempt to append a BufferDescription with the same attribute name"""
with pytest.raises(ValueError):
geometry = ctx.geometry(
[
BufferDescription(ctx.buffer(reserve=16), "2f", ['in_position'])
]
)
geometry.append_buffer_description(BufferDescription(ctx.buffer(reserve=16), '4f', ['in_position']))
def test_vertex_array_wrong_attrib_mapping(ctx):
"""Attempt to map an float buffer into an int attribute"""
geometry =ctx.geometry(
[BufferDescription(ctx.buffer(reserve=16), '2f', ['in_pos'])]
)
program = ctx.program(
vertex_shader="""
#version 330
in ivec2 in_pos;
out ivec2 out_pos;
void main() {
out_pos = in_pos;
}
""",
)
with pytest.raises(ValueError, match="GL_INT"):
geometry.transform(program, ctx.buffer(reserve=16)) |
299,741 | accept | import socket
import sys
from _typeshed import ReadableBuffer
from builtins import type as Type # alias to avoid name clashes with property named "type"
from collections.abc import Iterable
from types import TracebackType
from typing import Any, BinaryIO, NoReturn, overload
from typing_extensions import TypeAlias
# These are based in socket, maybe move them out into _typeshed.pyi or such
_Address: TypeAlias = socket._Address
_RetAddress: TypeAlias = Any
_WriteBuffer: TypeAlias = bytearray | memoryview
_CMSG: TypeAlias = tuple[int, int, bytes]
class TransportSocket:
def __init__(self, sock: socket.socket) -> None: ...
@property
def family(self) -> int: ...
@property
def type(self) -> int: ...
@property
def proto(self) -> int: ...
def __getstate__(self) -> NoReturn: ...
def fileno(self) -> int: ...
def dup(self) -> socket.socket: ...
def get_inheritable(self) -> bool: ...
def shutdown(self, how: int) -> None: ...
@overload
def getsockopt(self, level: int, optname: int) -> int: ...
@overload
def getsockopt(self, level: int, optname: int, buflen: int) -> bytes: ...
@overload
def setsockopt(self, level: int, optname: int, value: int | ReadableBuffer) -> None: ...
@overload
def setsockopt(self, level: int, optname: int, value: None, optlen: int) -> None: ...
def getpeername(self) -> _RetAddress: ...
def getsockname(self) -> _RetAddress: ...
def getsockbyname(self) -> NoReturn: ... # This method doesn't exist on socket, yet is passed through?
def settimeout(self, value: float | None) -> None: ...
def gettimeout(self) -> float | None: ...
def setblocking(self, flag: bool) -> None: ...
if sys.version_info < (3, 11):
def _na(self, what: str) -> None: ...
def METHOD_NAME(self) -> tuple[socket.socket, _RetAddress]: ...
def connect(self, address: _Address) -> None: ...
def connect_ex(self, address: _Address) -> int: ...
def bind(self, address: _Address) -> None: ...
if sys.platform == "win32":
def ioctl(self, control: int, option: int | tuple[int, int, int] | bool) -> None: ...
else:
def ioctl(self, control: int, option: int | tuple[int, int, int] | bool) -> NoReturn: ...
def listen(self, __backlog: int = ...) -> None: ...
def makefile(self) -> BinaryIO: ...
def sendfile(self, file: BinaryIO, offset: int = ..., count: int | None = ...) -> int: ...
def close(self) -> None: ...
def detach(self) -> int: ...
if sys.platform == "linux":
def sendmsg_afalg(
self, msg: Iterable[ReadableBuffer] = ..., *, op: int, iv: Any = ..., assoclen: int = ..., flags: int = ...
) -> int: ...
else:
def sendmsg_afalg(
self, msg: Iterable[ReadableBuffer] = ..., *, op: int, iv: Any = ..., assoclen: int = ..., flags: int = ...
) -> NoReturn: ...
def sendmsg(
self,
__buffers: Iterable[ReadableBuffer],
__ancdata: Iterable[_CMSG] = ...,
__flags: int = ...,
__address: _Address = ...,
) -> int: ...
@overload
def sendto(self, data: ReadableBuffer, address: _Address) -> int: ...
@overload
def sendto(self, data: ReadableBuffer, flags: int, address: _Address) -> int: ...
def send(self, data: ReadableBuffer, flags: int = ...) -> int: ...
def sendall(self, data: ReadableBuffer, flags: int = ...) -> None: ...
def set_inheritable(self, inheritable: bool) -> None: ...
if sys.platform == "win32":
def share(self, process_id: int) -> bytes: ...
else:
def share(self, process_id: int) -> NoReturn: ...
def recv_into(self, buffer: _WriteBuffer, nbytes: int = ..., flags: int = ...) -> int: ...
def recvfrom_into(self, buffer: _WriteBuffer, nbytes: int = ..., flags: int = ...) -> tuple[int, _RetAddress]: ...
def recvmsg_into(
self, __buffers: Iterable[_WriteBuffer], __ancbufsize: int = ..., __flags: int = ...
) -> tuple[int, list[_CMSG], int, Any]: ...
def recvmsg(self, __bufsize: int, __ancbufsize: int = ..., __flags: int = ...) -> tuple[bytes, list[_CMSG], int, Any]: ...
def recvfrom(self, bufsize: int, flags: int = ...) -> tuple[bytes, _RetAddress]: ...
def recv(self, bufsize: int, flags: int = ...) -> bytes: ...
def __enter__(self) -> socket.socket: ...
def __exit__(
self, exc_type: Type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None
) -> None: ... |
299,742 | make icon | ######################################################################################################################
# Copyright (C) 2017-2022 Spine project consortium
# This file is part of Spine Toolbox.
# Spine Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option)
# any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General
# Public License for more details. You should have received a copy of the GNU Lesser General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
######################################################################################################################
"""
Contains base classes for project items and item factories.
"""
class ProjectItemFactory:
"""Class for project item factories."""
@staticmethod
def item_class():
"""
Returns the project item's class.
Returns:
type: item's class
"""
raise NotImplementedError()
@staticmethod
def is_deprecated():
"""Queries if item is deprecated.
Returns:
bool: True if item is deprecated, False otherwise
"""
return False
@staticmethod
def icon():
"""
Returns the icon resource path.
Returns:
str
"""
raise NotImplementedError()
@staticmethod
def icon_color():
"""
Returns the icon color.
Returns:
QColor: icon's color
"""
raise NotImplementedError()
@staticmethod
def make_add_item_widget(toolbox, x, y, specification):
"""
Returns an appropriate Add project item widget.
Args:
toolbox (ToolboxUI): the main window
x, y (int): Icon coordinates
specification (ProjectItemSpecification): item's specification
Returns:
QWidget
"""
raise NotImplementedError()
@staticmethod
def METHOD_NAME(toolbox):
"""
Returns a ProjectItemIcon to use with given toolbox, for given project item.
Args:
toolbox (ToolboxUI)
Returns:
ProjectItemIcon: item's icon
"""
raise NotImplementedError()
@staticmethod
def make_item(name, item_dict, toolbox, project):
"""
Returns a project item constructed from the given ``item_dict``.
Args:
name (str): item's name
item_dict (dict): serialized project item
toolbox (ToolboxUI): Toolbox main window
project (SpineToolboxProject): the project the item belongs to
Returns:
ProjectItem
"""
raise NotImplementedError()
@staticmethod
def make_properties_widget(toolbox):
"""
Creates the item's properties tab widget.
Returns:
QWidget: item's properties tab widget
"""
raise NotImplementedError()
@staticmethod
def make_specification_menu(parent, index):
"""
Creates item specification's context menu.
Subclasses that do not support specifications can still raise :class:`NotImplementedError`.
Args:
parent (QWidget): menu's parent widget
index (QModelIndex): an index from specification model
Returns:
ItemSpecificationMenu: specification's context menu
"""
raise NotImplementedError()
@staticmethod
def make_specification_editor(toolbox, specification=None, item=None, **kwargs):
"""
Creates the item's specification widget.
Subclasses that do not support specifications can still raise :class:`NotImplementedError`.
Args:
toolbox (ToolboxUI): Toolbox main window
specification (ProjectItemSpecification, optional): a specification to show in the widget or None for
a fresh start
item (ProjectItem, optional): a project item. If the specification is accepted, it is also set for this item
**kwargs: parameters passed to the specification widget
Returns:
QWidget: item's specification widget
"""
raise NotImplementedError()
@staticmethod
def repair_specification(toolbox, specification):
"""Called right after a spec is added to the project. Finds if there's something wrong with the spec
and proposes actions to fix it with help from toolbox.
Args:
toolbox (ToolboxUI): Toolbox main window
specification (ProjectItemSpecification): a specification to check
""" |
299,743 | test date modify and query | # This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
import time
from datetime import datetime
import unittest
from test.helper import TestHelper
from confuse import ConfigValueError
class TypesPluginTest(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets()
self.load_plugins('types')
def tearDown(self):
self.unload_plugins()
self.teardown_beets()
def test_integer_modify_and_query(self):
self.config['types'] = {'myint': 'int'}
item = self.add_item(artist='aaa')
# Do not match unset values
out = self.list('myint:1..3')
self.assertEqual('', out)
self.modify('myint=2')
item.load()
self.assertEqual(item['myint'], 2)
# Match in range
out = self.list('myint:1..3')
self.assertIn('aaa', out)
def test_album_integer_modify_and_query(self):
self.config['types'] = {'myint': 'int'}
album = self.add_album(albumartist='aaa')
# Do not match unset values
out = self.list_album('myint:1..3')
self.assertEqual('', out)
self.modify('-a', 'myint=2')
album.load()
self.assertEqual(album['myint'], 2)
# Match in range
out = self.list_album('myint:1..3')
self.assertIn('aaa', out)
def test_float_modify_and_query(self):
self.config['types'] = {'myfloat': 'float'}
item = self.add_item(artist='aaa')
# Do not match unset values
out = self.list('myfloat:10..0')
self.assertEqual('', out)
self.modify('myfloat=-9.1')
item.load()
self.assertEqual(item['myfloat'], -9.1)
# Match in range
out = self.list('myfloat:-10..0')
self.assertIn('aaa', out)
def test_bool_modify_and_query(self):
self.config['types'] = {'mybool': 'bool'}
true = self.add_item(artist='true')
false = self.add_item(artist='false')
self.add_item(artist='unset')
# Do not match unset values
out = self.list('mybool:true, mybool:false')
self.assertEqual('', out)
# Set true
self.modify('mybool=1', 'artist:true')
true.load()
self.assertEqual(true['mybool'], True)
# Set false
self.modify('mybool=false', 'artist:false')
false.load()
self.assertEqual(false['mybool'], False)
# Query bools
out = self.list('mybool:true', '$artist $mybool')
self.assertEqual('true True', out)
out = self.list('mybool:false', '$artist $mybool')
# Dealing with unset fields?
# self.assertEqual('false False', out)
# out = self.list('mybool:', '$artist $mybool')
# self.assertIn('unset $mybool', out)
def METHOD_NAME(self):
self.config['types'] = {'mydate': 'date'}
# FIXME parsing should also work with default time format
self.config['time_format'] = '%Y-%m-%d'
old = self.add_item(artist='prince')
new = self.add_item(artist='britney')
# Do not match unset values
out = self.list('mydate:..2000')
self.assertEqual('', out)
self.modify('mydate=1999-01-01', 'artist:prince')
old.load()
self.assertEqual(old['mydate'], mktime(1999, 1, 1))
self.modify('mydate=1999-12-30', 'artist:britney')
new.load()
self.assertEqual(new['mydate'], mktime(1999, 12, 30))
# Match in range
out = self.list('mydate:..1999-07', '$artist $mydate')
self.assertEqual('prince 1999-01-01', out)
# FIXME some sort of timezone issue here
# out = self.list('mydate:1999-12-30', '$artist $mydate')
# self.assertEqual('britney 1999-12-30', out)
def test_unknown_type_error(self):
self.config['types'] = {'flex': 'unkown type'}
with self.assertRaises(ConfigValueError):
self.run_command('ls')
def test_template_if_def(self):
# Tests for a subtle bug when using %ifdef in templates along with
# types that have truthy default values (e.g. '0', '0.0', 'False')
# https://github.com/beetbox/beets/issues/3852
self.config['types'] = {'playcount': 'int', 'rating': 'float',
'starred': 'bool'}
with_fields = self.add_item(artist='prince')
self.modify('playcount=10', 'artist=prince')
self.modify('rating=5.0', 'artist=prince')
self.modify('starred=yes', 'artist=prince')
with_fields.load()
without_fields = self.add_item(artist='britney')
int_template = '%ifdef{playcount,Play count: $playcount,Not played}'
self.assertEqual(with_fields.evaluate_template(int_template),
'Play count: 10')
self.assertEqual(without_fields.evaluate_template(int_template),
'Not played')
float_template = '%ifdef{rating,Rating: $rating,Not rated}'
self.assertEqual(with_fields.evaluate_template(float_template),
'Rating: 5.0')
self.assertEqual(without_fields.evaluate_template(float_template),
'Not rated')
bool_template = '%ifdef{starred,Starred: $starred,Not starred}'
self.assertIn(with_fields.evaluate_template(bool_template).lower(),
('starred: true', 'starred: yes', 'starred: y'))
self.assertEqual(without_fields.evaluate_template(bool_template),
'Not starred')
def modify(self, *args):
return self.run_with_output('modify', '--yes', '--nowrite',
'--nomove', *args)
def list(self, query, fmt='$artist - $album - $title'):
return self.run_with_output('ls', '-f', fmt, query).strip()
def list_album(self, query, fmt='$albumartist - $album - $title'):
return self.run_with_output('ls', '-a', '-f', fmt, query).strip()
def mktime(*args):
return time.mktime(datetime(*args).timetuple())
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite') |
299,744 | test iiif base | # -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Universität Hamburg.
#
# Invenio-RDM-Records is free software; you can redistribute it and/or modify
# it under the terms of the MIT License; see LICENSE file for more details.
from io import BytesIO
from PIL import Image
from werkzeug.utils import secure_filename
def publish_record_with_images(
client, file_id, record, headers, restricted_files=False
):
"""A record with files."""
record["files"]["enabled"] = True
if restricted_files:
record["access"]["files"] = "restricted"
# Create a draft
res = client.post("/records", headers=headers, json=record)
id_ = res.json["id"]
# create a new image
res = client.post(
f"/records/{id_}/draft/files", headers=headers, json=[{"key": file_id}]
)
# Upload a file
image_file = BytesIO()
image = Image.new("RGBA", (1280, 1024), (255, 0, 0, 0))
image.save(image_file, "png")
image_file.seek(0)
res = client.put(
f"/records/{id_}/draft/files/{file_id}/content",
headers={"content-type": "application/octet-stream"},
data=image_file,
)
# Commit the file
res = client.post(f"/records/{id_}/draft/files/{file_id}/commit", headers=headers)
# Publish the record
res = client.post(f"/records/{id_}/draft/actions/publish", headers=headers)
return id_
def test_file_links_depending_on_file_extensions(
running_app, search_clear, client, uploader, headers, minimal_record
):
client = uploader.login(client)
file_id = "test_image.zip"
recid = publish_record_with_images(client, file_id, minimal_record, headers)
response = client.get(f"/records/{recid}/files/{file_id}")
assert "iiif_canvas" not in response.json["links"]
assert "iiif_base" not in response.json["links"]
assert "iiif_info" not in response.json["links"]
assert "iiif_api" not in response.json["links"]
file_id = "test_image.png"
recid = publish_record_with_images(client, file_id, minimal_record, headers)
response = client.get(f"/records/{recid}/files/{file_id}")
assert "iiif_canvas" in response.json["links"]
assert "iiif_base" in response.json["links"]
assert "iiif_info" in response.json["links"]
assert "iiif_api" in response.json["links"]
def METHOD_NAME(
running_app, search_clear, client, uploader, headers, minimal_record
):
client = uploader.login(client)
file_id = "test_image.png"
recid = publish_record_with_images(client, file_id, minimal_record, headers)
response = client.get(f"/iiif/record:{recid}:{file_id}")
assert response.status_code == 301
assert (
response.json["location"]
== f"https://127.0.0.1:5000/api/iiif/record:{recid}:{file_id}/info.json"
)
def test_iiif_info(
running_app, search_clear, client, uploader, headers, minimal_record
):
client = uploader.login(client)
file_id = "test_image.png"
recid = publish_record_with_images(client, file_id, minimal_record, headers)
response = client.get(f"/iiif/record:{recid}:{file_id}/info.json")
assert response.status_code == 200
assert response.json == {
"@context": "http://iiif.io/api/image/2/context.json",
"profile": ["http://iiif.io/api/image/2/level2.json"],
"protocol": "http://iiif.io/api/image",
"@id": f"https://127.0.0.1:5000/api/iiif/record:{recid}:{file_id}",
"tiles": [{"width": 256, "scaleFactors": [1, 2, 4, 8, 16, 32, 64]}],
"width": 1280,
"height": 1024,
}
def test_api_info_not_found(running_app, search_clear, client):
response = client.get(f"/iiif/record:1234-abcd:notfound.png/info.json")
assert response.status_code == 404
def test_iiif_base_restricted_files(
running_app,
search_clear,
client,
uploader,
headers,
minimal_record,
users,
):
client = uploader.login(client)
file_id = "test_image.png"
recid = publish_record_with_images(
client, file_id, minimal_record, headers, restricted_files=True
)
client = uploader.logout(client)
response = client.get(f"/iiif/record:{recid}:{file_id}")
assert response.status_code == 403
# Log in user and try again
client = uploader.login(client)
response = client.get(f"/iiif/record:{recid}:{file_id}")
assert response.status_code == 301
def test_iiif_info_restricted_files(
running_app,
search_clear,
client,
uploader,
headers,
minimal_record,
users,
):
client = uploader.login(client)
file_id = "test_image.png"
recid = publish_record_with_images(
client, file_id, minimal_record, headers, restricted_files=True
)
client = uploader.logout(client)
response = client.get(f"/iiif/record:{recid}:{file_id}/info.json")
assert response.status_code == 403
# Log in user and try again
client = uploader.login(client)
response = client.get(f"/iiif/record:{recid}:{file_id}/info.json")
assert response.status_code == 200
def test_iiif_image_api(
running_app, search_clear, client, uploader, headers, minimal_record
):
client = uploader.login(client)
file_id = "test_image.png"
recid = publish_record_with_images(client, file_id, minimal_record, headers)
# create a new image equal to the one in the record
tmp_file = BytesIO()
image = Image.new("RGBA", (1280, 1024), (255, 0, 0, 0))
image.save(tmp_file, "png")
tmp_file.seek(0)
response = client.get(f"/iiif/record:{recid}:{file_id}/full/full/0/default.png")
assert response.status_code == 200
assert response.data == tmp_file.getvalue()
response = client.get(
f"/iiif/record:{recid}:{file_id}/200,200,200,200/300,300/!50/color.pdf"
)
assert response.status_code == 200
default_name = secure_filename(
f"record:{recid}:{file_id}-200200200200-300300-color-50.pdf"
)
for dl, name in (
("", default_name),
("1", default_name),
("foo.pdf", "foo.pdf"),
):
response = client.get(
f"/iiif/record:{recid}:{file_id}/"
f"200,200,200,200/300,300/!50/color.pdf?dl={dl}"
)
assert response.status_code == 200
assert response.headers["Content-Disposition"] == f"attachment; filename={name}" |
299,745 | input schema fit | # Copyright 2019-2022 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from functools import wraps
import lale.docstrings
import lale.operators
logger = logging.getLogger(__name__)
# since we want the LoggingObserver
logger.setLevel(logging.INFO)
def observe(f):
@wraps(f)
def wrapper(self, *args, **kwds):
name = f.__name__
self.startObserving(name, *args, **kwds)
try:
ret = f(self, *args, **kwds)
self.endObserving(name, ret)
except BaseException as e:
self.failObserving(name, e)
raise
return ret
return wrapper
start_prefix = "start_"
end_prefix = "end_"
fail_prefix = "fail_"
class _ObservingImpl:
def __init__(self, op=None, observer=None):
if observer is not None and isinstance(observer, type):
# if we are given a class name, instantiate it
observer = observer()
self._hyperparams = {"op": op, "observer": observer}
def getOp(self):
return self._hyperparams["op"]
def getObserver(self):
return self._hyperparams["observer"]
def _observe(self, methodName, *args, **kwargs):
o = self.getObserver()
if o is not None:
m = getattr(o, methodName, None)
if m is not None:
m(self.getOp(), *args, **kwargs)
def startObserving(self, methodName, *args, **kwargs):
self._observe(f"{start_prefix}{methodName}", *args, **kwargs)
def endObserving(self, methodName, *args, **kwargs):
self._observe(f"{end_prefix}{methodName}", *args, **kwargs)
def failObserving(self, methodName, e: BaseException):
self._observe(f"{fail_prefix}{methodName}", e)
@observe
def transform(self, X, y=None):
ret = self.getOp().transform(X, y=y)
self.endObserving("transform", ret)
return ret
@observe
def transform_schema(self, s_X):
return self.getOp().transform_schema(s_X)
@observe
def METHOD_NAME(self):
return self.getOp().METHOD_NAME()
@observe
def predict(self, X, **predict_params):
return self.getOp().predict(X, **predict_params)
@observe
def predict_proba(self, X):
return self.getOp().predict_proba(self, X)
@observe
def fit(self, X, y=None, **fit_params):
self._hyperparams["op"] = self.getOp().fit(X, y=y, **fit_params)
return self
_hyperparams_schema = {
"description": "Hyperparameter schema for the identity Higher Order Operator, which wraps another operator and runs it as usual",
"allOf": [
{
"description": "This first object lists all constructor arguments with their types, but omits constraints for conditional hyperparameters",
"type": "object",
"additionalProperties": False,
"relevantToOptimizer": ["op"],
"properties": {
"op": {"laleType": "operator"},
"observer": {"laleType": "Any"},
},
}
],
}
# TODO: can we surface the base op input/output schema?
_input_fit_schema = {
"description": "Input data schema for training identity.",
"type": "object",
"required": ["X"],
"additionalProperties": False,
"properties": {"X": {}},
}
_input_predict_transform_schema = (
{ # TODO: separate predict vs. predict_proba vs. transform
"description": "Input data schema for transformations using identity.",
"type": "object",
"required": ["X", "y"],
"additionalProperties": False,
"properties": {"X": {}, "y": {}},
}
)
_output_schema = { # TODO: separate predict vs. predict_proba vs. transform
"description": "Output data schema for transformations using identity.",
"laleType": "Any",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": """This should functionally be identical to the identity wrapper, except that it calls methods on the observer (if they exist) before and after calls to the underlying wrapper. This is similar to aspect-oriented programming. See also Tee, which provides a simpler method for observing/logging data.""",
"documentation_url": "https://lale.readthedocs.io/en/latest/modules/lale.lib.lale.identity.html",
"import_from": "lale.lib.lale",
"type": "object",
"tags": {"pre": [], "op": ["estimator", "transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_predict": _input_predict_transform_schema,
"output_predict": _output_schema,
"input_predict_proba": _input_predict_transform_schema,
"output_predict_proba": _output_schema,
"input_transform": _input_predict_transform_schema,
"output_transform": _output_schema,
},
}
Observing = lale.operators.make_operator(_ObservingImpl, _combined_schemas)
lale.docstrings.set_docstrings(Observing)
class LoggingObserver:
"""An observer that logs everything.
This is also useful for debugging, since you can set breakpoints here
"""
_indent: int
def __init__(self):
self._indent = 0
def __getattr__(self, prop: str):
if prop.startswith("_"):
raise AttributeError
if prop.startswith(start_prefix):
suffix = prop[len(start_prefix) :]
def startfun(*args, **kwargs):
if logger.isEnabledFor(logging.INFO):
s: str = " " * self._indent
s += f"[observing({suffix})->] "
s += ",".join((str(x) for x in args))
if len(args) > 0 and len(kwargs) > 0:
s += ", "
for k, v in kwargs.items():
s += f"{k}->{v}"
logger.info(s)
self._indent += 1
return startfun
elif prop.startswith(end_prefix):
suffix = prop[len(end_prefix) :]
def endfun(*args, **kwargs):
assert self._indent > 0
self._indent -= 1
if logger.isEnabledFor(logging.INFO):
s: str = " " * self._indent
s += f"[<-observed({suffix})] "
s += ",".join((str(x) for x in args))
for k, v in kwargs.items():
s += f"{k}->{v}"
logger.info(s)
return endfun
elif prop.startswith(fail_prefix):
suffix = prop[len(fail_prefix) :]
def failfun(*args, **kwargs):
assert self._indent > 0
self._indent -= 1
if logger.isEnabledFor(logging.INFO):
s: str = " " * self._indent
s += f"[!error!<-observed({suffix})] "
s += ",".join((str(x) for x in args))
for k, v in kwargs.items():
s += f"{k}->{v}"
logger.info(s)
return failfun
else:
logger.debug(f"trying to observe {prop}, which is not a start or stop")
return None |
299,746 | encode ir | import logging
import os
from typing import Optional, Tuple, List
from slither import Slither
from slither.core.declarations import (
Structure,
Enum,
SolidityVariableComposed,
SolidityVariable,
Function,
)
from slither.core.solidity_types import (
ElementaryType,
ArrayType,
MappingType,
UserDefinedType,
)
from slither.core.variables.local_variable import LocalVariable
from slither.core.variables.local_variable_init_from_tuple import LocalVariableInitFromTuple
from slither.core.variables.state_variable import StateVariable
from slither.slithir.operations import (
Assignment,
Index,
Member,
Length,
Binary,
Unary,
Condition,
NewArray,
NewStructure,
NewContract,
NewElementaryType,
SolidityCall,
Delete,
EventCall,
LibraryCall,
InternalDynamicCall,
HighLevelCall,
LowLevelCall,
TypeConversion,
Return,
Transfer,
Send,
Unpack,
InitArray,
InternalCall,
)
from slither.slithir.variables import (
TemporaryVariable,
TupleVariable,
Constant,
ReferenceVariable,
)
from .cache import load_cache
simil_logger = logging.getLogger("Slither-simil")
compiler_logger = logging.getLogger("CryticCompile")
compiler_logger.setLevel(logging.CRITICAL)
slither_logger = logging.getLogger("Slither")
slither_logger.setLevel(logging.CRITICAL)
def parse_target(target: Optional[str]) -> Tuple[Optional[str], Optional[str]]:
if target is None:
return None, None
parts = target.split(".")
if len(parts) == 1:
return None, parts[0]
if len(parts) == 2:
return parts[0], parts[1]
simil_logger.error("Invalid target. It should be 'function' or 'Contract.function'")
return None, None
def load_and_encode(infile: str, vmodel, ext=None, nsamples=None, **kwargs):
r = {}
if infile.endswith(".npz"):
r = load_cache(infile, nsamples=nsamples)
else:
contracts = load_contracts(infile, ext=ext, nsamples=nsamples)
for contract in contracts:
for x, ir in encode_contract(contract, **kwargs).items():
if ir != []:
y = " ".join(ir)
r[x] = vmodel.get_sentence_vector(y)
return r
def load_contracts(
dirname: str, ext: Optional[str] = None, nsamples: Optional[int] = None
) -> List[str]:
r = []
walk = list(os.walk(dirname))
for x, y, files in walk:
for f in files:
if ext is None or f.endswith(ext):
r.append(x + "/".join(y) + "/" + f)
if nsamples is None:
return r
# TODO: shuffle
return r[:nsamples]
def ntype(_type): # pylint: disable=too-many-branches
if isinstance(_type, ElementaryType):
_type = str(_type)
elif isinstance(_type, ArrayType):
if isinstance(_type.type, ElementaryType):
_type = str(_type)
else:
_type = "user_defined_array"
elif isinstance(_type, Structure):
_type = str(_type)
elif isinstance(_type, Enum):
_type = str(_type)
elif isinstance(_type, MappingType):
_type = str(_type)
elif isinstance(_type, UserDefinedType):
_type = "user_defined_type" # TODO: this could be Contract, Enum or Struct
else:
_type = str(_type)
_type = _type.replace(" memory", "")
_type = _type.replace(" storage ref", "")
if "struct" in _type:
return "struct"
if "enum" in _type:
return "enum"
if "tuple" in _type:
return "tuple"
if "contract" in _type:
return "contract"
if "mapping" in _type:
return "mapping"
return _type.replace(" ", "_")
def METHOD_NAME(ir): # pylint: disable=too-many-branches
# operations
if isinstance(ir, Assignment):
return f"({METHOD_NAME(ir.lvalue)}):=({METHOD_NAME(ir.rvalue)})"
if isinstance(ir, Index):
return f"index({ntype(ir.index_type)})"
if isinstance(ir, Member):
return "member" # .format(ntype(ir._type))
if isinstance(ir, Length):
return "length"
if isinstance(ir, Binary):
return f"binary({str(ir.type)})"
if isinstance(ir, Unary):
return f"unary({str(ir.type)})"
if isinstance(ir, Condition):
return f"condition({METHOD_NAME(ir.value)})"
if isinstance(ir, NewStructure):
return "new_structure"
if isinstance(ir, NewContract):
return "new_contract"
if isinstance(ir, NewArray):
return f"new_array({ntype(ir.array_type)})"
if isinstance(ir, NewElementaryType):
return f"new_elementary({ntype(ir.type)})"
if isinstance(ir, Delete):
return f"delete({METHOD_NAME(ir.lvalue)},{METHOD_NAME(ir.variable)})"
if isinstance(ir, SolidityCall):
return f"solidity_call({ir.function.full_name})"
if isinstance(ir, InternalCall):
return f"internal_call({ntype(ir.type_call)})"
if isinstance(ir, EventCall): # is this useful?
return "event"
if isinstance(ir, LibraryCall):
return "library_call"
if isinstance(ir, InternalDynamicCall):
return "internal_dynamic_call"
if isinstance(ir, HighLevelCall): # TODO: improve
return "high_level_call"
if isinstance(ir, LowLevelCall): # TODO: improve
return "low_level_call"
if isinstance(ir, TypeConversion):
return f"type_conversion({ntype(ir.type)})"
if isinstance(ir, Return): # this can be improved using values
return "return" # .format(ntype(ir.type))
if isinstance(ir, Transfer):
return f"transfer({METHOD_NAME(ir.call_value)})"
if isinstance(ir, Send):
return f"send({METHOD_NAME(ir.call_value)})"
if isinstance(ir, Unpack): # TODO: improve
return "unpack"
if isinstance(ir, InitArray): # TODO: improve
return "init_array"
if isinstance(ir, Function): # TODO: investigate this
return "function_solc"
# variables
if isinstance(ir, Constant):
return f"constant({ntype(ir.type)})"
if isinstance(ir, SolidityVariableComposed):
return f"solidity_variable_composed({ir.name})"
if isinstance(ir, SolidityVariable):
return f"solidity_variable{ir.name}"
if isinstance(ir, TemporaryVariable):
return "temporary_variable"
if isinstance(ir, ReferenceVariable):
return f"reference({ntype(ir.type)})"
if isinstance(ir, LocalVariable):
return f"local_solc_variable({ir.location})"
if isinstance(ir, StateVariable):
return f"state_solc_variable({ntype(ir.type)})"
if isinstance(ir, LocalVariableInitFromTuple):
return "local_variable_init_tuple"
if isinstance(ir, TupleVariable):
return "tuple_variable"
# default
simil_logger.error(type(ir), "is missing encoding!")
return ""
def encode_contract(cfilename, **kwargs):
r = {}
# Init slither
try:
slither = Slither(cfilename, **kwargs)
except Exception: # pylint: disable=broad-except
simil_logger.error("Compilation failed for %s using %s", cfilename, kwargs["solc"])
return r
# Iterate over all the contracts
for contract in slither.contracts:
# Iterate over all the functions
for function in contract.functions_declared:
if function.nodes == [] or function.is_constructor_variables:
continue
x = (cfilename, contract.name, function.name)
r[x] = []
# Iterate over the nodes of the function
for node in function.nodes:
# Print the Solidity expression of the nodes
# And the SlithIR operations
if node.expression:
for ir in node.irs:
r[x].append(METHOD_NAME(ir))
return r |
299,747 | drop database | import argparse
import sys
import pymapd
from heavy.thrift.ttypes import TDashboard
def getOptions(args=None):
parser = argparse.ArgumentParser(description='Basic benchmark for system tables')
parser.add_argument('--host', help='HEAVY.AI server address', default='localhost')
parser.add_argument('--port', help='HEAVY.AI server port', default='6273')
parser.add_argument('--user', help='HEAVY.AI user name', default='admin')
parser.add_argument('--password', help='HEAVY.AI password', default='HyperInteractive')
parser.add_argument('--database_count', help='Number of databases to create', default=1)
parser.add_argument('--table_count', help='Number of tables to create', default=100)
parser.add_argument('--dashboard_count', help='Number of dashboards to create', default=100)
parser.add_argument('--user_count', help='Number of users to create', default=10)
parser.add_argument('--role_count', help='Number of roles to create', default=5)
parser.add_argument('--skip_object_creation', help='Skip creation of database objects', default=False)
parser.add_argument('--skip_object_deletion', help='Skip deletion of database objects', default=False)
parser.add_argument('--tag', help='Tag for test run')
return parser.parse_args(args)
class HeavyAICon:
def __init__(self, user, password, db_name, host):
self.con = pymapd.connect(user=user, password=password, dbname=db_name, host=host)
self.cursor = self.con.cursor()
def query(self, sql):
return self.cursor.execute(sql)
def create_dashboard(self, dashboard_name):
dashboard = TDashboard(dashboard_name = dashboard_name)
return self.con.create_dashboard(dashboard)
def create_database(heavyai_con, db_id):
heavyai_con.query(f"CREATE DATABASE test_db_{db_id}")
def create_insert_and_select_from_table(heavyai_con, table_id):
heavyai_con.query(f"CREATE TABLE test_table_{table_id} (a INTEGER, b TEXT)")
for i in range(10):
heavyai_con.query(f"INSERT INTO test_table_{table_id} VALUES ({i}, 'abc_{i}')")
heavyai_con.query(f"SELECT AVG(a) FROM test_table_{table_id}")
def create_dashboard(heavyai_con, dashboard_id):
heavyai_con.create_dashboard(f"test_dashboard_{dashboard_id}")
def create_user(heavyai_con, user_id):
heavyai_con.query(f"CREATE USER test_user_{user_id} (password = 'test_pass')")
def create_role(heavyai_con, role_id):
heavyai_con.query(f"CREATE ROLE test_role_{role_id}")
def assign_role(heavyai_con, user_id, role_id):
heavyai_con.query(f"GRANT test_role_{role_id} TO test_user_{user_id}")
def grant_role_table_select(heavyai_con, role_id, db_id):
heavyai_con.query(f"GRANT SELECT ON DATABASE test_db_{db_id} TO test_role_{role_id}")
def grant_user_table_select(heavyai_con, user_id, db_id):
heavyai_con.query(f"GRANT SELECT ON DATABASE test_db_{db_id} TO test_user_{user_id}")
def METHOD_NAME(heavyai_con, db_id):
heavyai_con.query(f"DROP DATABASE test_db_{db_id}")
def drop_user(heavyai_con, user_id):
heavyai_con.query(f"DROP USER test_user_{user_id}")
def drop_role(heavyai_con, role_id):
heavyai_con.query(f"DROP ROLE test_role_{role_id}")
def query_and_time_system_table(heavyai_con, table_name):
query = f"SELECT COUNT(*) FROM {table_name}"
result = heavyai_con.query(query)
print(f"Query: {query}, Execution time: {result._result.execution_time_ms}ms")
query = f"SELECT * FROM {table_name} LIMIT 10"
result = heavyai_con.query(query)
print(f"Query: {query}, Execution time: {result._result.execution_time_ms}ms")
def get_connection(options, db_name):
return HeavyAICon(options.user, options.password, db_name, options.host)
def main(argv):
options = getOptions(argv)
default_db = "heavyai"
heavyai_con = get_connection(options, default_db)
if not options.skip_object_creation:
print("Creating database objects")
for db_id in range(options.database_count):
create_database(heavyai_con, db_id)
db_name = f"test_db_{db_id}"
heavyai_con = get_connection(options, db_name)
for table_id in range(options.table_count):
create_insert_and_select_from_table(heavyai_con, table_id)
print(f"{options.table_count} tables created for {db_name}")
for dashboard_id in range(options.dashboard_count):
create_dashboard(heavyai_con, dashboard_id)
print(f"{options.dashboard_count} dashboards created for {db_name}")
print(f"{options.database_count} databases created")
heavyai_con = get_connection(options, default_db)
for user_id in range(options.user_count):
create_user(heavyai_con, user_id)
print(f"{options.user_count} users created")
for role_id in range(options.role_count):
create_role(heavyai_con, role_id)
print(f"{options.role_count} roles created")
half_roles = int(options.role_count / 2)
for user_id in range(options.user_count):
for role_id in range(half_roles):
assign_role(heavyai_con, user_id, role_id)
if options.database_count > 0:
db_id = 0
for role_id in range(half_roles):
grant_role_table_select(heavyai_con, role_id + half_roles, db_id)
half_users = int(options.user_count / 2)
for user_id in range(half_users):
grant_user_table_select(heavyai_con, user_id + half_users, db_id)
system_tables = ["tables",
"dashboards",
"databases",
"users",
"permissions",
"role_assignments",
"roles",
"storage_details",
"memory_details",
"memory_summary"]
heavyai_con = get_connection(options, "information_schema")
print("Executing system table queries")
for table_name in system_tables:
query_and_time_system_table(heavyai_con, table_name)
if not options.skip_object_deletion:
heavyai_con = get_connection(options, default_db)
print("Dropping databases")
for db_id in range(options.database_count):
METHOD_NAME(heavyai_con, db_id)
print("Dropping users")
for user_id in range(options.user_count):
drop_user(heavyai_con, user_id)
print("Dropping roles")
for role_id in range(options.role_count):
drop_role(heavyai_con, role_id)
if __name__ == "__main__":
main(sys.argv[1:]) |
299,748 | test join to location no cache if | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import pytest
from datetime import datetime, timezone
import numpy as np
from flowmachine.features import SubscriberLocations
from flowmachine.core import JoinToLocation, location_joined_query, make_spatial_unit
from flowmachine.core.errors import InvalidSpatialUnitError
def test_join_to_location_column_names(exemplar_spatial_unit_param):
"""Test that JoinToLocation's column_names property is accurate."""
if not exemplar_spatial_unit_param.has_geography:
pytest.skip("JoinToLocation does not accept CellSpatialUnit objects")
table = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
joined = JoinToLocation(table, spatial_unit=exemplar_spatial_unit_param)
assert joined.head(0).columns.tolist() == joined.column_names
def METHOD_NAME():
with pytest.raises(NotImplementedError):
table = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("admin", level=3)
)
joined = JoinToLocation(table, spatial_unit=make_spatial_unit("admin", level=3))
joined.fully_qualified_table_name
def test_join_to_location_raises_value_error():
"""
Test that JoinToLocation raises an InvalidSpatialUnitError if spatial_unit
does not have geography information.
"""
with pytest.raises(InvalidSpatialUnitError):
table = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
joined = JoinToLocation(table, spatial_unit=make_spatial_unit("cell"))
moving_sites = [
"N0tfEoYN",
"yPANTB8f",
"DJHTx6HD",
"aqt4bzQU",
"PYTxb352",
"hRU1j2q5",
"UJE4phmX",
]
# Date at which the cells move
move_date = datetime(2016, 1, 6, tzinfo=timezone.utc)
def test_join_with_versioned_cells(get_dataframe, get_length):
"""
Test that flowmachine.JoinToLocation can fetch the cell version.
"""
ul = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
df = get_dataframe(
JoinToLocation(ul, spatial_unit=make_spatial_unit("versioned-cell"))
)
# As our database is complete we should not drop any rows
assert len(df) == get_length(ul)
# These should all be version zero, these are the towers before the changeover date, or those that
# have not moved.
should_be_version_zero = df[
(df.time <= move_date) | (~df.location_id.isin(moving_sites))
]
# These should all be one, they are the ones after the change over time that have moved.
should_be_version_one = df[
(df.time > move_date) & (df.location_id.isin(moving_sites))
]
assert (should_be_version_zero.version == 0).all()
assert (should_be_version_one.version == 1).all()
def test_join_with_lon_lat(get_dataframe):
"""
Test that flowmachine.JoinToLocation can get the lon-lat values of the cell
"""
ul = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
df = get_dataframe(JoinToLocation(ul, spatial_unit=make_spatial_unit("lon-lat")))
expected_cols = sorted(["subscriber", "time", "location_id", "lon", "lat"])
assert sorted(df.columns) == expected_cols
# Pick out one cell that moves location and assert that the
# lon-lats are right
focal_cell = "dJb0Wd"
lon1, lat1 = (83.09284486, 27.648837800000003)
lon2, lat2 = (83.25769074752517, 27.661443318109132)
post_move = df[(df.time > move_date) & (df["location_id"] == focal_cell)]
pre_move = df[(df.time < move_date) & (df["location_id"] == focal_cell)]
# And check them all one-by-one
np.isclose(pre_move.lon, lon1).all()
np.isclose(pre_move.lat, lat1).all()
np.isclose(post_move.lon, lon2).all()
np.isclose(post_move.lat, lat2).all()
def test_join_with_polygon(get_dataframe, get_length):
"""
Test that flowmachine.JoinToLocation can get the (arbitrary) polygon
of each cell.
"""
ul = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
j = JoinToLocation(
ul,
spatial_unit=make_spatial_unit(
"polygon",
region_id_column_name="admin3pcod",
geom_table="geography.admin3",
geom_column="geom",
),
)
df = get_dataframe(j)
expected_cols = sorted(["admin3pcod", "location_id", "subscriber", "time"])
assert sorted(df.columns) == expected_cols
assert len(df) == get_length(ul)
def test_join_to_admin(get_dataframe, get_length):
"""
Test that flowmachine.JoinToLocation can join to a admin region.
"""
ul = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
df = get_dataframe(
JoinToLocation(ul, spatial_unit=make_spatial_unit("admin", level=3))
)
assert len(df) == get_length(ul)
expected_cols = sorted(["subscriber", "time", "location_id", "pcod"])
assert sorted(df.columns) == expected_cols
def test_join_to_grid(get_dataframe, get_length):
"""
Test that we can join to a grid square
"""
ul = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
df = get_dataframe(
JoinToLocation(ul, spatial_unit=make_spatial_unit("grid", size=50))
)
assert len(df) == get_length(ul)
def test_location_joined_query_return_type(exemplar_spatial_unit_param):
"""
Test that location_joined_query(query, spatial_unit) returns a
JoinToLocation object when spatial_unit != CellSpatialUnit(), and returns
query when spatial_unit == CellSpatialUnit().
"""
table = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
joined = location_joined_query(table, spatial_unit=exemplar_spatial_unit_param)
if make_spatial_unit("cell") == exemplar_spatial_unit_param:
assert joined is table
else:
assert isinstance(joined, JoinToLocation)
def test_ocation_joined_query_raises_error():
"""
Test that location_joined_query raises an error if spatial_unit is not a
SpatialUnit object.
"""
table = SubscriberLocations(
"2016-01-05", "2016-01-07", spatial_unit=make_spatial_unit("cell")
)
with pytest.raises(InvalidSpatialUnitError):
location_joined_query(table, spatial_unit="foo") |
299,749 | fill extents | from typing import Any, Optional
class Context:
def __init__(self, target: Any) -> None: ...
def get_target(self): ...
def save(self) -> None: ...
def restore(self) -> None: ...
def __enter__(self): ...
def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> None: ...
def push_group(self) -> None: ...
def push_group_with_content(self, content: Any) -> None: ...
def pop_group(self): ...
def pop_group_to_source(self) -> None: ...
def get_group_target(self): ...
def set_source_rgba(
self, red: float, green: float, blue: float, alpha: float = ...
) -> None: ...
def set_source_rgb(self, red: float, green: float, blue: float) -> None: ...
def set_source_surface(self, surface: Any, x: int = ..., y: int = ...) -> None: ...
def set_source(self, source: Any) -> None: ...
def get_source(self): ...
def set_antialias(self, antialias: Any) -> None: ...
def get_antialias(self): ...
def set_dash(self, dashes: Any, offset: int = ...) -> None: ...
def get_dash(self): ...
def get_dash_count(self): ...
def set_fill_rule(self, fill_rule: Any) -> None: ...
def get_fill_rule(self): ...
def set_line_cap(self, line_cap: Any) -> None: ...
def get_line_cap(self): ...
def set_line_join(self, line_join: Any) -> None: ...
def get_line_join(self): ...
def set_line_width(self, width: Any) -> None: ...
def get_line_width(self): ...
def set_miter_limit(self, limit: Any) -> None: ...
def get_miter_limit(self): ...
def set_operator(self, operator: Any) -> None: ...
def get_operator(self): ...
def set_tolerance(self, tolerance: Any) -> None: ...
def get_tolerance(self): ...
def translate(self, tx: Any, ty: Any) -> None: ...
def scale(self, sx: Any, sy: Optional[Any] = ...) -> None: ...
def rotate(self, radians: Any) -> None: ...
def transform(self, matrix: Any) -> None: ...
def set_matrix(self, matrix: Any) -> None: ...
def get_matrix(self): ...
def identity_matrix(self) -> None: ...
def user_to_device(self, x: Any, y: Any): ...
def user_to_device_distance(self, dx: Any, dy: Any): ...
def device_to_user(self, x: Any, y: Any): ...
def device_to_user_distance(self, dx: Any, dy: Any): ...
def has_current_point(self): ...
def get_current_point(self): ...
def new_path(self) -> None: ...
def new_sub_path(self) -> None: ...
def move_to(self, x: Any, y: Any) -> None: ...
def rel_move_to(self, dx: Any, dy: Any) -> None: ...
def line_to(self, x: Any, y: Any) -> None: ...
def rel_line_to(self, dx: Any, dy: Any) -> None: ...
def rectangle(self, x: Any, y: Any, width: Any, height: Any) -> None: ...
def arc(self, xc: Any, yc: Any, radius: Any, angle1: Any, angle2: Any) -> None: ...
def arc_negative(self, xc: Any, yc: Any, radius: Any, angle1: Any, angle2: Any) -> None: ...
def curve_to(self, x1: Any, y1: Any, x2: Any, y2: Any, x3: Any, y3: Any) -> None: ...
def rel_curve_to(
self, dx1: Any, dy1: Any, dx2: Any, dy2: Any, dx3: Any, dy3: Any
) -> None: ...
def text_path(self, text: Any) -> None: ...
def glyph_path(self, glyphs: Any) -> None: ...
def close_path(self) -> None: ...
def copy_path(self): ...
def copy_path_flat(self): ...
def append_path(self, path: Any) -> None: ...
def path_extents(self): ...
def paint(self) -> None: ...
def paint_with_alpha(self, alpha: Any) -> None: ...
def mask(self, pattern: Any) -> None: ...
def mask_surface(self, surface: Any, surface_x: int = ..., surface_y: int = ...) -> None: ...
def fill(self) -> None: ...
def fill_preserve(self) -> None: ...
def METHOD_NAME(self): ...
def in_fill(self, x: Any, y: Any): ...
def stroke(self) -> None: ...
def stroke_preserve(self) -> None: ...
def stroke_extents(self): ...
def in_stroke(self, x: Any, y: Any): ...
def clip(self) -> None: ...
def clip_preserve(self) -> None: ...
def clip_extents(self): ...
def copy_clip_rectangle_list(self): ...
def in_clip(self, x: Any, y: Any): ...
def reset_clip(self) -> None: ...
def select_font_face(
self, family: str = ..., slant: Any = ..., weight: Any = ...
) -> None: ...
def set_font_face(self, font_face: Any) -> None: ...
def get_font_face(self): ...
def set_font_size(self, size: Any) -> None: ...
def set_font_matrix(self, matrix: Any) -> None: ...
def get_font_matrix(self): ...
def set_font_options(self, font_options: Any) -> None: ...
def get_font_options(self): ...
def set_scaled_font(self, scaled_font: Any) -> None: ...
def get_scaled_font(self): ...
def font_extents(self): ...
def text_extents(self, text: Any): ...
def glyph_extents(self, glyphs: Any): ...
def show_text(self, text: Any) -> None: ...
def show_glyphs(self, glyphs: Any) -> None: ...
def show_text_glyphs(
self, text: Any, glyphs: Any, clusters: Any, cluster_flags: int = ...
) -> None: ...
def show_page(self) -> None: ...
def copy_page(self) -> None: ...
def tag_begin(self, tag_name: Any, attributes: Optional[Any] = ...) -> None: ...
def tag_end(self, tag_name: Any) -> None: ... |
299,750 | test aws session | """Tests of fiona.env"""
import os
import sys
from unittest import mock
import boto3
import pytest
import fiona
from fiona import _env
from fiona.env import getenv, hasenv, ensure_env, ensure_env_with_credentials
from fiona.errors import FionaDeprecationWarning
from fiona.session import AWSSession, GSSession
def test_nested_credentials(monkeypatch):
"""Check that rasterio.open() doesn't wipe out surrounding credentials"""
@ensure_env_with_credentials
def fake_opener(path):
return fiona.env.getenv()
with fiona.env.Env(
session=AWSSession(aws_access_key_id="foo", aws_secret_access_key="bar")
):
assert fiona.env.getenv()["AWS_ACCESS_KEY_ID"] == "foo"
assert fiona.env.getenv()["AWS_SECRET_ACCESS_KEY"] == "bar"
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "lol")
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "wut")
gdalenv = fake_opener("s3://foo/bar")
assert gdalenv["AWS_ACCESS_KEY_ID"] == "foo"
assert gdalenv["AWS_SECRET_ACCESS_KEY"] == "bar"
def test_ensure_env_decorator(gdalenv):
@ensure_env
def f():
return getenv()["FIONA_ENV"]
assert f() is True
def test_ensure_env_decorator_sets_gdal_data(gdalenv, monkeypatch):
"""fiona.env.ensure_env finds GDAL from environment"""
@ensure_env
def f():
return getenv()["GDAL_DATA"]
monkeypatch.setenv("GDAL_DATA", "/lol/wut")
assert f() == "/lol/wut"
@mock.patch("fiona._env.GDALDataFinder.find_file")
def test_ensure_env_decorator_sets_gdal_data_prefix(
find_file, gdalenv, monkeypatch, tmpdir
):
"""fiona.env.ensure_env finds GDAL data under a prefix"""
@ensure_env
def f():
return getenv()["GDAL_DATA"]
find_file.return_value = None
tmpdir.ensure("share/gdal/header.dxf")
monkeypatch.delenv("GDAL_DATA", raising=False)
monkeypatch.setattr(_env, "__file__", str(tmpdir.join("fake.py")))
monkeypatch.setattr(sys, "prefix", str(tmpdir))
assert f() == str(tmpdir.join("share").join("gdal"))
@mock.patch("fiona._env.GDALDataFinder.find_file")
def test_ensure_env_decorator_sets_gdal_data_wheel(
find_file, gdalenv, monkeypatch, tmpdir
):
"""fiona.env.ensure_env finds GDAL data in a wheel"""
@ensure_env
def f():
return getenv()["GDAL_DATA"]
find_file.return_value = None
tmpdir.ensure("gdal_data/header.dxf")
monkeypatch.delenv("GDAL_DATA", raising=False)
monkeypatch.setattr(
_env, "__file__", str(tmpdir.join(os.path.basename(_env.__file__)))
)
assert f() == str(tmpdir.join("gdal_data"))
@mock.patch("fiona._env.GDALDataFinder.find_file")
def test_ensure_env_with_decorator_sets_gdal_data_wheel(
find_file, gdalenv, monkeypatch, tmpdir
):
"""fiona.env.ensure_env finds GDAL data in a wheel"""
@ensure_env_with_credentials
def f(*args):
return getenv()["GDAL_DATA"]
find_file.return_value = None
tmpdir.ensure("gdal_data/header.dxf")
monkeypatch.delenv("GDAL_DATA", raising=False)
monkeypatch.setattr(
_env, "__file__", str(tmpdir.join(os.path.basename(_env.__file__)))
)
assert f("foo") == str(tmpdir.join("gdal_data"))
def test_ensure_env_crs(path_coutwildrnp_shp):
"""Decoration of .crs works"""
assert fiona.open(path_coutwildrnp_shp).crs
def test_env_default_env(path_coutwildrnp_shp):
with fiona.open(path_coutwildrnp_shp):
assert hasenv()
def test_nested_gs_credentials(monkeypatch):
"""Check that rasterio.open() doesn't wipe out surrounding credentials"""
@ensure_env_with_credentials
def fake_opener(path):
return fiona.env.getenv()
with fiona.env.Env(session=GSSession(google_application_credentials="foo")):
assert fiona.env.getenv()["GOOGLE_APPLICATION_CREDENTIALS"] == "foo"
gdalenv = fake_opener("gs://foo/bar")
assert gdalenv["GOOGLE_APPLICATION_CREDENTIALS"] == "foo"
def METHOD_NAME(gdalenv):
"""Create an Env with a boto3 session."""
aws_session = boto3.Session(
aws_access_key_id="id",
aws_secret_access_key="key",
aws_session_token="token",
region_name="null-island-1",
)
with pytest.warns(FionaDeprecationWarning):
with fiona.env.Env(session=aws_session) as s:
assert (
s.session._session.get_credentials().get_frozen_credentials().access_key
== "id"
)
assert (
s.session._session.get_credentials().get_frozen_credentials().secret_key
== "key"
)
assert (
s.session._session.get_credentials().get_frozen_credentials().token
== "token"
)
assert s.session._session.region_name == "null-island-1" |
299,751 | update documentation version placeholders | from __future__ import print_function
import os
import sys
import re
import string
import glob
import fnmatch
from scriptCommon import catchPath
versionParser = re.compile( r'(\s*static\sVersion\sversion)\s*\(\s*(.*)\s*,\s*(.*)\s*,\s*(.*)\s*,\s*\"(.*)\"\s*,\s*(.*)\s*\).*' )
rootPath = os.path.join( catchPath, 'src/catch2' )
versionPath = os.path.join( rootPath, "catch_version.cpp" )
definePath = os.path.join(rootPath, 'catch_version_macros.hpp')
readmePath = os.path.join( catchPath, "README.md" )
cmakePath = os.path.join(catchPath, 'CMakeLists.txt')
mesonPath = os.path.join(catchPath, 'meson.build')
class Version:
def __init__(self):
f = open( versionPath, 'r' )
for line in f:
m = versionParser.match( line )
if m:
self.variableDecl = m.group(1)
self.majorVersion = int(m.group(2))
self.minorVersion = int(m.group(3))
self.patchNumber = int(m.group(4))
self.branchName = m.group(5)
self.buildNumber = int(m.group(6))
f.close()
def nonDevelopRelease(self):
if self.branchName != "":
self.branchName = ""
self.buildNumber = 0
def developBuild(self):
if self.branchName == "":
self.branchName = "develop"
self.buildNumber = 0
def incrementBuildNumber(self):
self.developBuild()
self.buildNumber = self.buildNumber+1
def incrementPatchNumber(self):
self.nonDevelopRelease()
self.patchNumber = self.patchNumber+1
def incrementMinorVersion(self):
self.nonDevelopRelease()
self.patchNumber = 0
self.minorVersion = self.minorVersion+1
def incrementMajorVersion(self):
self.nonDevelopRelease()
self.patchNumber = 0
self.minorVersion = 0
self.majorVersion = self.majorVersion+1
def getVersionString(self):
versionString = '{0}.{1}.{2}'.format( self.majorVersion, self.minorVersion, self.patchNumber )
if self.branchName != "":
versionString = versionString + '-{0}.{1}'.format( self.branchName, self.buildNumber )
return versionString
def updateVersionFile(self):
f = open( versionPath, 'r' )
lines = []
for line in f:
m = versionParser.match( line )
if m:
lines.append( '{0}( {1}, {2}, {3}, "{4}", {5} );'.format( self.variableDecl, self.majorVersion, self.minorVersion, self.patchNumber, self.branchName, self.buildNumber ) )
else:
lines.append( line.rstrip() )
f.close()
f = open( versionPath, 'w' )
for line in lines:
f.write( line + "\n" )
def updateCmakeFile(version):
with open(cmakePath, 'rb') as file:
lines = file.readlines()
replacementRegex = re.compile(b'''VERSION (\\d+.\\d+.\\d+) # CML version placeholder, don't delete''')
replacement = '''VERSION {0} # CML version placeholder, don't delete'''.format(version.getVersionString()).encode('ascii')
with open(cmakePath, 'wb') as file:
for line in lines:
file.write(replacementRegex.sub(replacement, line))
def updateMesonFile(version):
with open(mesonPath, 'rb') as file:
lines = file.readlines()
replacementRegex = re.compile(b'''version\s*:\s*'(\\d+.\\d+.\\d+)', # CML version placeholder, don't delete''')
replacement = '''version: '{0}', # CML version placeholder, don't delete'''.format(version.getVersionString()).encode('ascii')
with open(mesonPath, 'wb') as file:
for line in lines:
file.write(replacementRegex.sub(replacement, line))
def updateVersionDefine(version):
# First member of the tuple is the compiled regex object, the second is replacement if it matches
replacementRegexes = [(re.compile(b'#define CATCH_VERSION_MAJOR \\d+'),'#define CATCH_VERSION_MAJOR {}'.format(version.majorVersion).encode('ascii')),
(re.compile(b'#define CATCH_VERSION_MINOR \\d+'),'#define CATCH_VERSION_MINOR {}'.format(version.minorVersion).encode('ascii')),
(re.compile(b'#define CATCH_VERSION_PATCH \\d+'),'#define CATCH_VERSION_PATCH {}'.format(version.patchNumber).encode('ascii')),
]
with open(definePath, 'rb') as file:
lines = file.readlines()
with open(definePath, 'wb') as file:
for line in lines:
for replacement in replacementRegexes:
line = replacement[0].sub(replacement[1], line)
file.write(line)
def updateVersionPlaceholder(filename, version):
with open(filename, 'rb') as file:
lines = file.readlines()
placeholderRegex = re.compile(b'in Catch[0-9]? X.Y.Z')
replacement = 'in Catch2 {}.{}.{}'.format(version.majorVersion, version.minorVersion, version.patchNumber).encode('ascii')
with open(filename, 'wb') as file:
for line in lines:
file.write(placeholderRegex.sub(replacement, line))
def METHOD_NAME(version):
print('Updating version placeholder in documentation')
docsPath = os.path.join(catchPath, 'docs/')
for basePath, _, files in os.walk(docsPath):
for file in files:
if fnmatch.fnmatch(file, "*.md") and "contributing.md" != file:
updateVersionPlaceholder(os.path.join(basePath, file), version)
def performUpdates(version):
version.updateVersionFile()
updateVersionDefine(version)
import generateAmalgamatedFiles
generateAmalgamatedFiles.generate_header()
generateAmalgamatedFiles.generate_cpp()
updateCmakeFile(version)
updateMesonFile(version)
METHOD_NAME(version) |
299,752 | test file record parser | # Copyright 2007 by Tiago Antao <tiagoantao@gmail.com>. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Tests for PopGen GenePop nodepend module."""
import os
import unittest
from Bio.PopGen import GenePop
from Bio.PopGen.GenePop import FileParser
import tempfile
class RecordTest(unittest.TestCase):
"""Record tests."""
def test_record_basic(self):
"""Basic test on Record."""
r = GenePop.Record()
self.assertIsInstance(r.marker_len, int)
self.assertIsInstance(r.comment_line, str)
self.assertIsInstance(r.loci_list, list)
self.assertIsInstance(r.populations, list)
class ParserTest(unittest.TestCase):
"""Parser tests."""
def setUp(self):
files = [
"c2line.gen",
"c3line.gen",
"c2space.gen",
"c3space.gen",
"haplo3.gen",
"haplo2.gen",
]
self.handles = []
for filename in files:
self.handles.append(open(os.path.join("PopGen", filename)))
self.pops_indivs = [
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
]
self.num_loci = [3, 3, 3, 3, 3, 3]
self.marker_len = [2, 3, 2, 3, 3, 2]
self.pop_names = ["4", "b3", "5"]
def tearDown(self):
for handle in self.handles:
handle.close()
def test_record_parser(self):
"""Basic operation of the Record Parser."""
for index in range(len(self.handles)):
handle = self.handles[index]
rec = GenePop.read(handle)
self.assertTrue(
str(rec).startswith(
"Generated by createGenePop.py - (C) Tiago Antao\n"
"136255903\n"
"136257048\n"
"136257636\n"
"Pop\n"
),
f"Did not expect this:\n{rec}",
)
self.assertIsInstance(rec, GenePop.Record)
self.assertEqual(len(rec.loci_list), self.num_loci[index])
self.assertEqual(rec.marker_len, self.marker_len[index])
self.assertEqual(len(rec.populations), self.pops_indivs[index][0])
self.assertEqual(rec.pop_list, self.pop_names)
for i in range(self.pops_indivs[index][0]):
self.assertEqual(len(rec.populations[i]), self.pops_indivs[index][1][i])
def test_wrong_file_parser(self):
"""Testing the ability to deal with wrongly formatted files."""
with open(os.path.join("PopGen", "README")) as f:
self.assertRaises(ValueError, GenePop.read, f)
class FileParserTest(unittest.TestCase):
"""File parser tests."""
def setUp(self):
self.files = [
os.path.join("PopGen", x)
for x in [
"c2line.gen",
"c3line.gen",
"c2space.gen",
"c3space.gen",
"haplo3.gen",
"haplo2.gen",
]
]
self.pops_indivs = [
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
(3, [4, 3, 5]),
]
self.num_loci = [3, 3, 3, 3, 3, 3]
def METHOD_NAME(self):
"""Basic operation of the File Record Parser."""
for index in range(len(self.files)):
fname = self.files[index]
rec = FileParser.read(fname)
self.assertIsInstance(rec, FileParser.FileRecord)
self.assertTrue(
str(rec).startswith(
"Generated by createGenePop.py - (C) Tiago Antao\n"
"136255903\n"
"136257048\n"
"136257636\n"
"Pop\n"
),
f"Did not expect this:\n{rec}",
)
self.assertEqual(len(rec.loci_list), self.num_loci[index])
for skip in range(self.pops_indivs[index][0]):
self.assertIn(
rec.skip_population(), (True, None), msg="Not enough populations"
)
self.assertFalse(rec.skip_population(), msg="Too much populations")
for i in range(self.pops_indivs[index][0]):
continue
rec._handle.close() # TODO - Needs a proper fix
def test_wrong_file_parser(self):
"""Testing the ability to deal with wrongly formatted files."""
with open(os.path.join("PopGen", "README")) as f:
self.assertRaises(ValueError, GenePop.read, f)
def test_remove_features(self):
"""Testing the ability to remove population/loci via class methods."""
for index in range(len(self.files)):
fname = self.files[index]
ftemp = tempfile.NamedTemporaryFile(mode="w+", delete=False)
ftemp.close()
rec = FileParser.read(fname)
rec.remove_loci_by_position([0], ftemp.name)
with open(ftemp.name) as ft:
ft.seek(0)
rec2 = GenePop.read(iter(ft))
self.assertEqual(rec.loci_list[1:], rec2.loci_list)
rec.remove_locus_by_position(0, ftemp.name)
with open(ftemp.name) as ft:
ft.seek(0)
rec3 = GenePop.read(iter(ft))
self.assertEqual(rec.loci_list[1:], rec3.loci_list)
rec.remove_locus_by_name(rec.loci_list[0], ftemp.name)
with open(ftemp.name) as ft:
ft.seek(0)
rec4 = GenePop.read(iter(ft))
self.assertEqual(rec.loci_list[1:], rec4.loci_list)
rec.remove_loci_by_name([rec.loci_list[0]], ftemp.name)
with open(ftemp.name) as ft:
ft.seek(0)
rec5 = GenePop.read(iter(ft))
self.assertEqual(rec.loci_list[1:], rec5.loci_list)
os.remove(ftemp.name)
rec._handle.close()
class UtilsTest(unittest.TestCase):
"""Utils tests."""
def setUp(self):
# All files have to have at least 3 loci and 2 pops
files = ["c2line.gen"]
self.handles = []
for filename in files:
self.handles.append(open(os.path.join("PopGen", filename)))
def tearDown(self):
for handle in self.handles:
handle.close()
def test_utils(self):
"""Basic operation of GenePop Utils."""
for index in range(len(self.handles)):
handle = self.handles[index]
rec = GenePop.read(handle)
initial_pops = len(rec.populations)
initial_loci = len(rec.loci_list)
first_loci = rec.loci_list[0]
rec.remove_population(0)
self.assertEqual(len(rec.populations), initial_pops - 1)
rec.remove_locus_by_name(first_loci)
self.assertEqual(len(rec.loci_list), initial_loci - 1)
self.assertNotEqual(rec.loci_list[0], first_loci)
rec.remove_locus_by_position(0)
self.assertEqual(len(rec.loci_list), initial_loci - 2)
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
unittest.main(testRunner=runner) |
299,753 | url | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"networkcloud volume delete",
confirmation="Are you sure you want to perform this operation?",
)
class Delete(AAZCommand):
"""Delete the provided volume.
:example: Delete volume
az networkcloud volume delete --resource-group "resourceGroupName" --name "volumeName"
"""
_aaz_info = {
"version": "2023-07-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.networkcloud/volumes/{}", "2023-07-01"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, None)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.volume_name = AAZStrArg(
options=["-n", "--name", "--volume-name"],
help="The name of the volume.",
required=True,
id_part="name",
fmt=AAZStrArgFormat(
pattern="^([a-zA-Z0-9][a-zA-Z0-9-_]{0,62}[a-zA-Z0-9])$",
),
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
yield self.VolumesDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class VolumesDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "location"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetworkCloud/volumes/{volumeName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"volumeName", self.ctx.args.volume_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2023-07-01",
required=True,
),
}
return parameters
def on_200(self, session):
pass
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
299,754 | landing url | """SonarQube violations collector."""
from shared_data_model import DATA_MODEL
from collector_utilities.type import URL
from model import Entities, Entity, SourceMeasurement, SourceResponses
from .base import SonarQubeCollector
class SonarQubeViolations(SonarQubeCollector):
"""SonarQube violations metric. Also base class for metrics that measure specific rules."""
rules_configuration = "" # Subclass responsibility
types_parameter = "types"
async def METHOD_NAME(self, responses: SourceResponses) -> URL:
"""Extend to add the issues path and parameters."""
url = await super().METHOD_NAME(responses)
component = self._parameter("component")
branch = self._parameter("branch")
landing_url = f"{url}/project/issues?id={component}&branch={branch}&resolved=false"
return URL(
landing_url
+ self._query_parameter("severities")
+ self._query_parameter(self.types_parameter)
+ self.__rules_url_parameter(),
)
async def _api_url(self) -> URL:
"""Extend to add the issue search path and parameters."""
url = await super()._api_url()
component = self._parameter("component")
branch = self._parameter("branch")
# If there's more than 500 issues only the first 500 are returned. This is no problem since we limit
# the number of "entities" sent to the server anyway (that limit is 100 currently).
api = f"{url}/api/issues/search?componentKeys={component}&branch={branch}&resolved=false&ps=500"
return URL(
api
+ self._query_parameter("severities")
+ self._query_parameter(self.types_parameter)
+ self.__rules_url_parameter(),
)
def _query_parameter(self, parameter_key: str) -> str:
"""Return the multiple choice parameter as query parameter that can be passed to SonarQube."""
values = ",".join(value.upper() for value in sorted(self._parameter(parameter_key)))
return "" if values == self.__default_value(parameter_key) else f"&{parameter_key}={values}"
def __rules_url_parameter(self) -> str:
"""Return the rules url parameter, if any."""
rules = (
DATA_MODEL.sources[self.source_type].configuration[self.rules_configuration].value
if self.rules_configuration
else []
)
return f"&rules={','.join(rules)}" if rules else ""
async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement:
"""Override to parse the issues."""
value = 0
entities = Entities()
for response in responses:
json = await response.json()
value += int(json.get("total", 0))
entities.extend([await self._entity(issue) for issue in json.get("issues", [])])
return SourceMeasurement(value=str(value), entities=entities)
async def __issue_landing_url(self, issue_key: str) -> URL:
"""Generate a landing url for the issue."""
url = await super().METHOD_NAME(SourceResponses())
component = self._parameter("component")
branch = self._parameter("branch")
return URL(f"{url}/project/issues?id={component}&branch={branch}&issues={issue_key}&open={issue_key}")
async def _entity(self, issue) -> Entity:
"""Create an entity from an issue."""
return Entity(
key=issue["key"],
url=await self.__issue_landing_url(issue["key"]),
message=issue["message"],
severity=issue.get("severity", "no severity").lower(),
type=issue["type"].lower(),
component=issue["component"],
creation_date=issue["creationDate"],
update_date=issue["updateDate"],
)
def __default_value(self, parameter_key: str) -> str:
"""Return the default value for the parameter."""
defaults = DATA_MODEL.sources[self.source_type].parameters[parameter_key].values or []
return ",".join(value.upper() for value in sorted(defaults))
class SonarQubeViolationsWithPercentageScale(SonarQubeViolations):
"""SonarQube violations collectors that support the percentage scale."""
total_metric = "" # Subclass responsibility
async def _get_source_responses(self, *urls: URL) -> SourceResponses:
"""Extend to, next to the violations, get the total number of violations as basis for the percentage scale."""
component = self._parameter("component")
branch = self._parameter("branch")
base_api_url = await SonarQubeCollector._api_url(self) # noqa: SLF001
total_metric_api_url = URL(
f"{base_api_url}/api/measures/component?component={component}&branch={branch}"
f"&metricKeys={self.total_metric}",
)
return await super()._get_source_responses(*([*urls, total_metric_api_url]))
async def _parse_source_responses(self, responses: SourceResponses) -> SourceMeasurement:
"""Extend to parse the total number of violations."""
measurement = await super()._parse_source_responses(responses)
measures: list[dict[str, str]] = []
for response in responses:
measures.extend((await response.json()).get("component", {}).get("measures", []))
# Note that the SonarQube api sometimes omits values (when they are 0) from the component measurement endpoint
# This has not (yet) been observed for the 'functions' metric and current code would iterate over an empty list
measurement.total = str(sum(int(measure["value"]) for measure in measures))
return measurement |
299,755 | predict | # !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Croston's Forecasting Method."""
import numpy as np
import pandas as pd
from sktime.forecasting.base import BaseForecaster
class Croston(BaseForecaster):
r"""Croston's method for forecasting intermittent time series.
Implements the method proposed by Croston in [1]_ and described in [2]_.
Croston's method is a modification of (vanilla) exponential smoothing to handle
intermittent time series. A time series is considered intermittent if many
of its values are zero and the gaps between non-zero entries are not periodic.
Croston's method will predict a constant value for all future times, so
Croston's method essentially provides another notion for the average value
of a time series.
The method is (equivalent to) the following:
- Let :math:`v_0,\ldots,v_n` be the non-zero values of the time series
- Let :math:`v` be the exponentially smoothed average of :math:`v_0,\ldots,v_n`
- Let :math:`z_0,\ldots,z_n` be the number of consecutive zeros plus 1 between
the :math:`v_i` in the original time series.
- Let :math:`z` be the exponentially smoothed average of :math:`z_0,\ldots,z_n`
- Then the forecast is :math:`\frac{v}{z}`
The intuition is that :math:`v` is a weighted average of the non-zero time
series values and :math:`\frac{1}{z}` estimates the probability of getting a
non-zero value.
Example to illustrate the :math:`v` and :math:`z` notation.
- If the original time series is :math:`0,0,2,7,0,0,0,-5` then:
- The :math:`v`'s are :math:`2,7,-5`
- The :math:`z`'s are :math:`3,1,4`
Parameters
----------
smoothing : float, default = 0.1
Smoothing parameter in exponential smoothing
Examples
--------
>>> from sktime.forecasting.croston import Croston
>>> from sktime.datasets import load_PBS_dataset
>>> y = load_PBS_dataset()
>>> forecaster = Croston(smoothing=0.1)
>>> forecaster.fit(y)
Croston(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
See Also
--------
ExponentialSmoothing
References
----------
.. [1] J. D. Croston. Forecasting and stock control for intermittent demands.
Operational Research Quarterly (1970-1977), 23(3):pp. 289–303, 1972.
.. [2] N. Vandeput. Forecasting Intermittent Demand with the Croston Model.
https://towardsdatascience.com/croston-forecast-model-for-intermittent-demand-360287a17f5f
"""
_tags = {
"requires-fh-in-fit": False, # is forecasting horizon already required in fit?
}
def __init__(self, smoothing=0.1):
# hyperparameter
self.smoothing = smoothing
self._f = None
super().__init__()
def _fit(self, y, X, fh):
"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
Returns
-------
self : returns an instance of self.
"""
n_timepoints = len(y) # Historical period: i.e the input array's length
smoothing = self.smoothing
y = y.to_numpy() # Transform the input into a numpy array
# Fit the parameters: level(q), periodicity(a) and forecast(f)
q, a, f = np.full((3, n_timepoints + 1), np.nan)
p = 1 # periods since last demand observation
# Initialization:
first_occurrence = np.argmax(y[:n_timepoints] > 0)
q[0] = y[first_occurrence]
a[0] = 1 + first_occurrence
f[0] = q[0] / a[0]
# Create t+1 forecasts:
for t in range(0, n_timepoints):
if y[t] > 0:
q[t + 1] = smoothing * y[t] + (1 - smoothing) * q[t]
a[t + 1] = smoothing * p + (1 - smoothing) * a[t]
f[t + 1] = q[t + 1] / a[t + 1]
p = 1
else:
q[t + 1] = q[t]
a[t + 1] = a[t]
f[t + 1] = f[t]
p += 1
self._f = f
return self
def METHOD_NAME(
self,
fh=None,
X=None,
):
"""Predict forecast.
Parameters
----------
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
Returns
-------
forecast : pd.series
Predicted forecasts.
"""
len_fh = len(self.fh)
f = self._f
# Predicting future forecasts:to_numpy()
y_pred = np.full(len_fh, f[-1])
index = self.fh.to_absolute_index(self.cutoff)
return pd.Series(y_pred, index=index, name=self._y.name)
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict
"""
params = [
{},
{"smoothing": 0},
{"smoothing": 0.42},
]
return params |
299,756 | configure arg parser by config | from __future__ import print_function
import argparse
import os
import sys
from contextlib import contextmanager
import colorama
from .. import conf, plugins
_PLUGIN_ACTIVATION_PREFIX = "--with-"
_PLUGIN_DEACTIVATION_PREFIX = "--without-"
def add_pending_plugins_from_commandline(argv):
returned_argv = []
for arg in argv:
if arg.startswith(_PLUGIN_DEACTIVATION_PREFIX):
plugin_name = arg[len(_PLUGIN_DEACTIVATION_PREFIX):]
plugins.manager.deactivate_later(plugin_name)
elif arg.startswith(_PLUGIN_ACTIVATION_PREFIX):
plugin_name = arg[len(_PLUGIN_ACTIVATION_PREFIX):]
plugins.manager.activate_later(plugin_name)
else:
returned_argv.append(arg)
return returned_argv
def configure_arg_parser_by_plugins(parser):
for plugin in plugins.manager.get_installed_plugins().values():
group = parser.add_argument_group('Options for {}{}'.format(
'' if plugins.manager.is_internal_plugin(plugin) else '--with-',
plugins.manager.normalize_command_line_name(plugin.get_name())))
plugin.configure_argument_parser(group)
def METHOD_NAME(parser, config=None):
if config is None:
config = conf.config
plugin_groups = {}
parser.add_argument(
"-o", dest="config_overrides", metavar="PATH=VALUE", action="append",
default=[],
help="Provide overrides for configuration"
)
for path, node, cmdline in _iter_cmdline_config(config):
if path.startswith('plugin_config.'):
plugin_name = path.split('.')[1]
subparser = plugin_groups.get(plugin_name)
if subparser is None:
subparser = plugin_groups[plugin_name] = parser.add_argument_group(
'options for the {0} plugin (--with-{0})'.format(plugin_name))
else:
subparser = parser
cmdline.configure_parser(subparser, path, node)
def configure_plugins_from_args(args):
for plugin in plugins.manager.get_active_plugins().values():
plugin.configure_from_parsed_args(args)
def _iter_cmdline_config(config):
for path, cfg in config.traverse_leaves():
cmdline = (cfg.metadata or {}).get("cmdline")
if cmdline is None:
continue
yield path, cfg, cmdline
@contextmanager
def get_modified_configuration_from_args_context(parser, args, config=None):
if config is None:
config = conf.config
to_restore = []
try:
for path, cfg, cmdline in _iter_cmdline_config(config):
old_value = cfg.get_value()
new_value = cmdline.update_value(old_value, args)
if new_value != old_value:
to_restore.append((path, cfg.get_value()))
try:
config.assign_path(path, new_value, deduce_type=True, default_type=str)
except ValueError:
parser.error('Invalid value for {}: {!r}'.format(cmdline, new_value))
for override in args.config_overrides:
if "=" not in override:
parser.error("Invalid config override: {}".format(override))
path, _ = override.split("=", 1)
to_restore.append((path, config.get_path(path)))
try:
config.assign_path_expression(override, deduce_type=True, default_type=str)
except ValueError:
parser.error("Invalid value for config override: {}".format(override))
yield
finally:
for path, prev_value in reversed(to_restore):
config.assign_path(path, prev_value)
class SlashArgumentParser(argparse.ArgumentParser):
def __init__(self, *args, **kwargs):
super(SlashArgumentParser, self).__init__(
prog=self._deduce_program_name(),
usage='{} [options]'.format(self._deduce_program_name()),
*args, **kwargs)
self._positionals_metavar = None
def _deduce_program_name(self):
returned = os.path.basename(sys.argv[0])
if len(sys.argv) > 1:
returned += " {}".format(sys.argv[1])
return returned
def set_positional_metavar(self, metavar, plural=True):
self._positionals_metavar = metavar
if plural:
self.usage += ' {0} [{0} [...]]'.format(metavar)
else:
self.usage += ' {}'.format(metavar)
def set_description(self, description):
self.description = description
COLOR_RESET = colorama.Fore.RESET + colorama.Back.RESET + colorama.Style.RESET_ALL # pylint: disable=no-member
class ColorizedString(str):
def __new__(cls, *args, **kwargs):
style = kwargs.pop('style', None)
returned = str.__new__(cls, *args, **kwargs)
returned.style = style
return returned
def colorize(self):
if self.style:
return '{}{}{}'.format(self.style, self, COLOR_RESET)
return str(self)
def make_styler(style):
return lambda string: ColorizedString(string, style=style)
UNDERLINED = '\x1b[4m'
class Printer(object):
def __init__(self, report_stread, enable_output=True, force_color=False, enable_color=True, error_stream=None):
self._stream = report_stread
self._error_stream = error_stream if error_stream is not None else self._stream
self._output_enabled = enable_output
self._force_color = force_color
self._color_enabled = enable_color
def _colored_print(self, *args, **print_kwargs):
self._print(*(getattr(arg, 'colorize', arg.__str__)() for arg in args), **print_kwargs)
def _print(self, *args, **print_kwargs):
is_error = print_kwargs.pop('error', False)
print_kwargs['file'] = self._error_stream if is_error else self._stream
print(*args, **print_kwargs)
def __call__(self, *args, **print_kwargs):
if self._output_enabled:
should_color = self._force_color or (self._color_enabled and self._stream.isatty())
print_func = self._colored_print if should_color else self._print
print_func(*args, **print_kwargs)
def error_abort(message, *args, **kwargs):
stream = kwargs.pop('stream', sys.stderr)
assert not kwargs
if args:
message = message.format(*args)
print(message, file=stream)
sys.exit(-1) |
299,757 | test update linear kwargs | __author__ = "sibirrer"
import numpy as np
from lenstronomy.ImSim.image_linear_solve import ImageLinearFit
import lenstronomy.Util.param_util as param_util
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.PointSource.point_source import PointSource
import lenstronomy.Util.simulation_util as sim_util
from lenstronomy.Data.imaging_data import ImageData
from lenstronomy.Data.psf import PSF
import numpy.testing as npt
class TestImageLinearFit(object):
def setup_method(self):
sigma_bkg = 0.05 # background noise per pixel
exp_time = 100 # exposure time (arbitrary units, flux per pixel is in units #photons/exp_time unit)
numPix = 100 # cutout pixel size
deltaPix = 0.05 # pixel size in arcsec (area per pixel = deltaPix**2)
fwhm = 0.5 # full width half max of PSF
# PSF specification
kwargs_data = sim_util.data_configure_simple(
numPix, deltaPix, exp_time, sigma_bkg, inverse=True
)
data_class = ImageData(**kwargs_data)
kwargs_psf = {
"psf_type": "GAUSSIAN",
"fwhm": fwhm,
"truncation": 5,
"pixel_size": deltaPix,
}
psf_class = PSF(**kwargs_psf)
kwargs_sis = {"theta_E": 1.0, "center_x": 0, "center_y": 0}
lens_model_list = ["SIS"]
self.kwargs_lens = [kwargs_sis]
lens_model_class = LensModel(lens_model_list=lens_model_list)
kwargs_sersic = {
"amp": 1.0,
"R_sersic": 0.1,
"n_sersic": 2,
"center_x": 0,
"center_y": 0,
}
# 'SERSIC_ELLIPSE': elliptical Sersic profile
phi, q = 0.2, 0.9
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
kwargs_sersic_ellipse = {
"amp": 1.0,
"R_sersic": 0.6,
"n_sersic": 7,
"center_x": 0,
"center_y": 0,
"e1": e1,
"e2": e2,
}
lens_light_model_list = ["SERSIC"]
self.kwargs_lens_light = [kwargs_sersic]
lens_light_model_class = LightModel(light_model_list=lens_light_model_list)
source_model_list = ["SERSIC_ELLIPSE"]
self.kwargs_source = [kwargs_sersic_ellipse]
source_model_class = LightModel(light_model_list=source_model_list)
self.kwargs_ps = [
{"ra_source": 0.01, "dec_source": 0.0, "source_amp": 1.0}
] # quasar point source position in the source plane and intrinsic brightness
point_source_class = PointSource(
point_source_type_list=["SOURCE_POSITION"], fixed_magnification_list=[True]
)
kwargs_numerics = {
"supersampling_factor": 2,
"supersampling_convolution": False,
}
self.imageModel = ImageLinearFit(
data_class,
psf_class,
lens_model_class,
source_model_class,
lens_light_model_class,
point_source_class,
kwargs_numerics=kwargs_numerics,
)
def test_linear_param_from_kwargs(self):
param = self.imageModel.linear_param_from_kwargs(
self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps
)
assert param[0] == self.kwargs_source[0]["amp"]
assert param[1] == self.kwargs_lens_light[0]["amp"]
assert param[2] == self.kwargs_ps[0]["source_amp"]
def METHOD_NAME(self):
num = self.imageModel.num_param_linear(
self.kwargs_lens, self.kwargs_source, self.kwargs_lens_light, self.kwargs_ps
)
param = np.ones(num) * 10
(
kwargs_lens,
kwargs_source,
kwargs_lens_light,
kwargs_ps,
) = self.imageModel.update_linear_kwargs(
param,
kwargs_lens=self.kwargs_lens,
kwargs_source=self.kwargs_source,
kwargs_lens_light=self.kwargs_lens_light,
kwargs_ps=self.kwargs_ps,
)
assert kwargs_source[0]["amp"] == 10
def test_error_response(self):
C_D_response, model_error = self.imageModel.error_response(
kwargs_lens=self.kwargs_lens, kwargs_ps=self.kwargs_ps, kwargs_special=None
)
npt.assert_almost_equal(model_error, 0) |
299,758 | make request and skip if | from DocXMLRPCServer import DocXMLRPCServer
import httplib
import sys
from test import test_support
threading = test_support.import_module('threading')
import time
import socket
import unittest
PORT = None
def METHOD_NAME(condition, reason):
# If we skip the test, we have to make a request because the
# the server created in setUp blocks expecting one to come in.
if not condition:
return lambda func: func
def decorator(func):
def make_request_and_skip(self):
self.client.request("GET", "/")
self.client.getresponse()
raise unittest.SkipTest(reason)
return make_request_and_skip
return decorator
def server(evt, numrequests):
serv = DocXMLRPCServer(("localhost", 0), logRequests=False)
try:
global PORT
PORT = serv.socket.getsockname()[1]
# Add some documentation
serv.set_server_title("DocXMLRPCServer Test Documentation")
serv.set_server_name("DocXMLRPCServer Test Docs")
serv.set_server_documentation(
"This is an XML-RPC server's documentation, but the server "
"can be used by POSTing to /RPC2. Try self.add, too.")
# Create and register classes and functions
class TestClass(object):
def test_method(self, arg):
"""Test method's docs. This method truly does very little."""
self.arg = arg
serv.register_introspection_functions()
serv.register_instance(TestClass())
def add(x, y):
"""Add two instances together. This follows PEP008, but has nothing
to do with RFC1952. Case should matter: pEp008 and rFC1952. Things
that start with http and ftp should be auto-linked, too:
http://google.com.
"""
return x + y
serv.register_function(add)
serv.register_function(lambda x, y: x-y)
while numrequests > 0:
serv.handle_request()
numrequests -= 1
except socket.timeout:
pass
finally:
serv.server_close()
PORT = None
evt.set()
class DocXMLRPCHTTPGETServer(unittest.TestCase):
def setUp(self):
self._threads = test_support.threading_setup()
# Enable server feedback
DocXMLRPCServer._send_traceback_header = True
self.evt = threading.Event()
threading.Thread(target=server, args=(self.evt, 1)).start()
# wait for port to be assigned
n = 1000
while n > 0 and PORT is None:
time.sleep(0.001)
n -= 1
self.client = httplib.HTTPConnection("localhost:%d" % PORT)
def tearDown(self):
self.client.close()
self.evt.wait()
# Disable server feedback
DocXMLRPCServer._send_traceback_header = False
test_support.threading_cleanup(*self._threads)
def test_valid_get_response(self):
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader("Content-type"), "text/html")
# Server raises an exception if we don't start to read the data
response.read()
def test_invalid_get_response(self):
self.client.request("GET", "/spam")
response = self.client.getresponse()
self.assertEqual(response.status, 404)
self.assertEqual(response.getheader("Content-type"), "text/plain")
response.read()
def test_lambda(self):
"""Test that lambda functionality stays the same. The output produced
currently is, I suspect invalid because of the unencoded brackets in the
HTML, "<lambda>".
The subtraction lambda method is tested.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn('<dl><dt><a name="-<lambda>"><strong>'
'<lambda></strong></a>(x, y)</dt></dl>',
response.read())
@METHOD_NAME(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_autolinking(self):
"""Test that the server correctly automatically wraps references to
PEPS and RFCs with links, and that it linkifies text starting with
http or ftp protocol prefixes.
The documentation for the "add" method contains the test material.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-add"><strong>add</strong></a>(x, y)</dt><dd>'
'<tt>Add two instances together. This '
'follows <a href="http://www.python.org/dev/peps/pep-0008/">'
'PEP008</a>, but has nothing<br>\nto do '
'with <a href="http://www.rfc-editor.org/rfc/rfc1952.txt">'
'RFC1952</a>. Case should matter: pEp008 '
'and rFC1952. Things<br>\nthat start '
'with http and ftp should be '
'auto-linked, too:<br>\n<a href="http://google.com">'
'http://google.com</a>.</tt></dd></dl>'), response.read())
@METHOD_NAME(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_system_methods(self):
"""Test the precense of three consecutive system.* methods.
This also tests their use of parameter type recognition and the
systems related to that process.
"""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn(
('<dl><dt><a name="-system.listMethods"><strong>system.listMethods'
'</strong></a>()</dt><dd><tt><a href="#-system.listMethods">system'
'.listMethods</a>() => [\'add\', \'subtract\','
' \'multiple\']<br>\n <br>\nReturns a list'
' of the methods supported by the'
' server.</tt></dd></dl>\n <dl><dt><a name="-system.methodHelp">'
'<strong>system.methodHelp</strong></a>(method_name)</dt><dd><tt>'
'<a href="#-system.methodHelp">system.methodHelp</a>(\'add\') '
'=> "Adds two integers together"<br>\n '
'<br>\nReturns a string containing documentation'
' for the specified method.</tt></dd></dl>\n '
'<dl><dt><a name="-system.methodSignature"><strong>system.'
'methodSignature</strong></a>(method_name)</dt><dd><tt><a href="#-'
'system.methodSignature">system.methodSignature</a>(\'add\') '
'=> [double, int, int]<br>\n <br>\nReturns'
' a list describing the signature of'
' the method. In the<br>\nabove example,'
' the add method takes two integers'
' as arguments<br>\nand returns a double'
' result.<br>\n <br>\nThis server does '
'NOT support system.methodSignature.</tt></dd></dl>'),
response.read())
def test_autolink_dotted_methods(self):
"""Test that selfdot values are made strong automatically in the
documentation."""
self.client.request("GET", "/")
response = self.client.getresponse()
self.assertIn("""Try self.<strong>add</strong>, too.""",
response.read())
def test_main():
test_support.run_unittest(DocXMLRPCHTTPGETServer)
if __name__ == '__main__':
test_main() |
299,759 | main | ################################################################################
# Copyright (C) 2023 Maxim Integrated Products, Inc., All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL MAXIM INTEGRATED BE LIABLE FOR ANY CLAIM, DAMAGES
# OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of Maxim Integrated
# Products, Inc. shall not be used except as stated in the Maxim Integrated
# Products, Inc. Branding Policy.
#
# The mere transfer of this software does not imply any licenses
# of trade secrets, proprietary technology, copyrights, patents,
# trademarks, maskwork rights, or any other form of intellectual
# property whatsoever. Maxim Integrated Products, Inc. retains all
# ownership rights.
#
###############################################################################
"""
Script to generate Face Id embeddings
"""
import argparse
import os.path as path
import numpy as np
import torch
from mtcnn.mtcnn import MTCNN
from ai85.ai85_adapter import AI85SimulatorAdapter
from utils import append_db_file_from_path, save_embedding_db, create_embeddings_include_file
CURRENT_DIR = path.abspath(path.dirname(path.abspath(__file__)))
MODEL_PATH = path.join(CURRENT_DIR, 'model', 'ai85-faceid-qat8-q.pth.tar')
def create_db_from_folder(args):
"""
Main function of the script to generate face detector, AI85 simulator and calls the utility
functions to generate embeddings and store them in required format.
"""
face_detector = MTCNN(image_size=80, margin=0, min_face_size=60, thresholds=[0.6, 0.8, 0.92],
factor=0.85, post_process=True, device='cpu')
ai85_adapter = AI85SimulatorAdapter(MODEL_PATH)
embedding_db, _ = append_db_file_from_path(args.db, face_detector, ai85_adapter,
db_dict=None, verbose=True)
if not embedding_db:
print(f'Cannot create a DB file. No face could be detected from the images in folder ',
f'`{args.db}`')
return
save_embedding_db(embedding_db, path.join(CURRENT_DIR, args.db_filename + '.bin'),
add_prev_imgs=True)
create_embeddings_include_file(CURRENT_DIR, args.db_filename, args.include_path)
def parse_arguments():
"""
Function to parse comman line arguments.
"""
parser = argparse.ArgumentParser(description='Create embedding database file.')
parser.add_argument('--db', '-db-path', type=str, default='db',
help='path for face images')
parser.add_argument('--db-filename', type=str, default='embeddings',
help='filename to store embeddings')
parser.add_argument('--include-path', type=str, default='embeddings',
help='path to include folder')
args = parser.parse_args()
return args
def METHOD_NAME():
"""
Entry point of the script to parse command line arguments and run the function to generate
embeddings.
"""
# make deterministic
torch.manual_seed(0)
np.random.seed(0)
args = parse_arguments()
create_db_from_folder(args)
if __name__ == "__main__":
METHOD_NAME() |
299,760 | get charset | from collections.abc import Generator, Iterator, Sequence
from email import _ParamsType, _ParamType
from email.charset import Charset
from email.contentmanager import ContentManager
from email.errors import MessageDefect
from email.policy import Policy
from typing import Any, TypeVar, overload
from typing_extensions import Self, TypeAlias
__all__ = ["Message", "EmailMessage"]
_T = TypeVar("_T")
_PayloadType: TypeAlias = list[Message] | str | bytes | bytearray
_CharsetType: TypeAlias = Charset | str | None
_HeaderType: TypeAlias = Any
class Message:
policy: Policy # undocumented
preamble: str | None
epilogue: str | None
defects: list[MessageDefect]
def is_multipart(self) -> bool: ...
def set_unixfrom(self, unixfrom: str) -> None: ...
def get_unixfrom(self) -> str | None: ...
def attach(self, payload: Message) -> None: ...
def get_payload(self, i: int | None = None, decode: bool = False) -> Any: ... # returns _PayloadType | None
def set_payload(self, payload: _PayloadType, charset: _CharsetType = None) -> None: ...
def set_charset(self, charset: _CharsetType) -> None: ...
def METHOD_NAME(self) -> _CharsetType: ...
def __len__(self) -> int: ...
def __contains__(self, name: str) -> bool: ...
def __iter__(self) -> Iterator[str]: ...
def __getitem__(self, name: str) -> _HeaderType: ...
def __setitem__(self, name: str, val: _HeaderType) -> None: ...
def __delitem__(self, name: str) -> None: ...
def keys(self) -> list[str]: ...
def values(self) -> list[_HeaderType]: ...
def items(self) -> list[tuple[str, _HeaderType]]: ...
@overload
def get(self, name: str, failobj: None = None) -> _HeaderType | None: ...
@overload
def get(self, name: str, failobj: _T) -> _HeaderType | _T: ...
@overload
def get_all(self, name: str, failobj: None = None) -> list[_HeaderType] | None: ...
@overload
def get_all(self, name: str, failobj: _T) -> list[_HeaderType] | _T: ...
def add_header(self, _name: str, _value: str, **_params: _ParamsType) -> None: ...
def replace_header(self, _name: str, _value: _HeaderType) -> None: ...
def get_content_type(self) -> str: ...
def get_content_maintype(self) -> str: ...
def get_content_subtype(self) -> str: ...
def get_default_type(self) -> str: ...
def set_default_type(self, ctype: str) -> None: ...
@overload
def get_params(
self, failobj: None = None, header: str = "content-type", unquote: bool = True
) -> list[tuple[str, str]] | None: ...
@overload
def get_params(self, failobj: _T, header: str = "content-type", unquote: bool = True) -> list[tuple[str, str]] | _T: ...
@overload
def get_param(
self, param: str, failobj: None = None, header: str = "content-type", unquote: bool = True
) -> _ParamType | None: ...
@overload
def get_param(self, param: str, failobj: _T, header: str = "content-type", unquote: bool = True) -> _ParamType | _T: ...
def del_param(self, param: str, header: str = "content-type", requote: bool = True) -> None: ...
def set_type(self, type: str, header: str = "Content-Type", requote: bool = True) -> None: ...
@overload
def get_filename(self, failobj: None = None) -> str | None: ...
@overload
def get_filename(self, failobj: _T) -> str | _T: ...
@overload
def get_boundary(self, failobj: None = None) -> str | None: ...
@overload
def get_boundary(self, failobj: _T) -> str | _T: ...
def set_boundary(self, boundary: str) -> None: ...
@overload
def get_content_charset(self) -> str | None: ...
@overload
def get_content_charset(self, failobj: _T) -> str | _T: ...
@overload
def get_charsets(self, failobj: None = None) -> list[str | None]: ...
@overload
def get_charsets(self, failobj: _T) -> list[str | _T]: ...
def walk(self) -> Generator[Self, None, None]: ...
def get_content_disposition(self) -> str | None: ...
def as_string(self, unixfrom: bool = False, maxheaderlen: int = 0, policy: Policy | None = None) -> str: ...
def as_bytes(self, unixfrom: bool = False, policy: Policy | None = None) -> bytes: ...
def __bytes__(self) -> bytes: ...
def set_param(
self,
param: str,
value: str,
header: str = "Content-Type",
requote: bool = True,
charset: str | None = None,
language: str = "",
replace: bool = False,
) -> None: ...
def __init__(self, policy: Policy = ...) -> None: ...
# The following two methods are undocumented, but a source code comment states that they are public API
def set_raw(self, name: str, value: _HeaderType) -> None: ...
def raw_items(self) -> Iterator[tuple[str, _HeaderType]]: ...
class MIMEPart(Message):
def __init__(self, policy: Policy | None = None) -> None: ...
def get_body(self, preferencelist: Sequence[str] = ("related", "html", "plain")) -> Message | None: ...
def iter_attachments(self) -> Iterator[Message]: ...
def iter_parts(self) -> Iterator[Message]: ...
def get_content(self, *args: Any, content_manager: ContentManager | None = None, **kw: Any) -> Any: ...
def set_content(self, *args: Any, content_manager: ContentManager | None = None, **kw: Any) -> None: ...
def make_related(self, boundary: str | None = None) -> None: ...
def make_alternative(self, boundary: str | None = None) -> None: ...
def make_mixed(self, boundary: str | None = None) -> None: ...
def add_related(self, *args: Any, content_manager: ContentManager | None = ..., **kw: Any) -> None: ...
def add_alternative(self, *args: Any, content_manager: ContentManager | None = ..., **kw: Any) -> None: ...
def add_attachment(self, *args: Any, content_manager: ContentManager | None = ..., **kw: Any) -> None: ...
def clear(self) -> None: ...
def clear_content(self) -> None: ...
def as_string(self, unixfrom: bool = False, maxheaderlen: int | None = None, policy: Policy | None = None) -> str: ...
def is_attachment(self) -> bool: ...
class EmailMessage(MIMEPart): ... |
299,761 | test graph constant | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgDict
def _test_global_new_full(test_case, shape, full_value, placement, sbp):
np_res = np.full(shape, full_value)
x = flow.ones(shape)
y = x.new_full(shape, full_value, placement=placement, sbp=sbp)
test_case.assertEqual(y.shape, flow.Size(shape))
test_case.assertEqual(y.sbp, sbp)
test_case.assertEqual(y.placement, placement)
y = y.to_global(
placement=placement,
sbp=[flow.sbp.broadcast for _ in range(len(placement.ranks.shape))],
).to_local()
test_case.assertTrue(np.array_equal(y.numpy(), np_res))
def _test_global_graph_new_full(test_case, shape, full_value, placement, sbp):
np_res = np.full(shape, full_value)
class GlobalNewFullGraph(flow.nn.Graph):
def __init__(self,):
super().__init__()
def build(self,):
x = flow.ones(shape)
y = x.new_full(shape, full_value, placement=placement, sbp=sbp)
return y
model = GlobalNewFullGraph()
y = model()
test_case.assertEqual(y.shape, flow.Size(shape))
test_case.assertEqual(y.sbp, sbp)
test_case.assertEqual(y.placement, placement)
y = y.to_global(
placement=placement,
sbp=[flow.sbp.broadcast for _ in range(len(placement.ranks.shape))],
).to_local()
test_case.assertTrue(np.array_equal(y.numpy(), np_res))
def _test_global_constant(test_case, func, shape, placement, sbp):
func2 = None
if func == "ones":
func = flow.ones
np_res = np.ones(shape)
elif func == "zeros":
func = flow.zeros
np_res = np.zeros(shape)
elif func == "new_zeros":
func = flow.zeros
np_res = np.zeros(shape)
func2 = flow.new_zeros
else:
raise NotImplementedError
x = func(*shape, placement=placement, sbp=sbp)
if func2:
x = func2(x)
test_case.assertEqual(x.shape, flow.Size(shape))
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
x = x.to_global(
placement=placement,
sbp=[flow.sbp.broadcast for _ in range(len(placement.ranks.shape))],
).to_local()
test_case.assertTrue(np.array_equal(x.numpy(), np_res))
def METHOD_NAME(test_case, func, shape, placement, sbp):
func2 = None
if func == "ones":
func = flow.ones
np_res = np.ones(shape)
elif func == "zeros":
func = flow.zeros
np_res = np.zeros(shape)
elif func == "new_zeros":
func = flow.zeros
np_res = np.zeros(shape)
func2 = flow.new_zeros
else:
raise NotImplementedError
class GlobalConstantGraph(flow.nn.Graph):
def __init__(self,):
super().__init__()
def build(self):
x = func(*shape, placement=placement, sbp=sbp)
if func2:
x = func2(x)
return x
model = GlobalConstantGraph()
x = model()
test_case.assertEqual(x.shape, flow.Size(shape))
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
x = x.to_global(
placement=placement,
sbp=[flow.sbp.broadcast for _ in range(len(placement.ranks.shape))],
).to_local()
test_case.assertTrue(np.array_equal(x.numpy(), np_res))
class TestConstantGlobal(flow.unittest.TestCase):
@globaltest
def test_constant_global(test_case):
shapes = [(8,), (8, 8,), (8, 8, 8)]
functions = [
"ones",
"zeros",
"new_zeros",
]
for func in functions:
for shape in shapes:
for placement in all_placement():
for sbp in all_sbp(
placement, max_dim=len(shape), except_partial_sum=True
):
_test_global_constant(test_case, func, shape, placement, sbp)
full_values = [2, 3, 4]
for full_value in full_values:
for shape in shapes:
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=len(shape),):
_test_global_new_full(
test_case, shape, full_value, placement, sbp
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n2d()
def test_constant_graph(test_case):
arg_dict = OrderedDict()
arg_dict["func"] = ["ones", "zeros", "new_zeros"]
arg_dict["shape"] = [(8,), (8, 8,), (8, 8, 8)]
arg_dict["placement"] = [
# 1d
flow.placement("cpu", ranks=[0, 1]),
flow.placement("cuda", ranks=[0, 1]),
# 2d
flow.placement("cpu", ranks=[[0, 1],]),
flow.placement("cuda", ranks=[[0, 1],]),
]
for args in GenArgDict(arg_dict):
func = args["func"]
shape = args["shape"]
placement = args["placement"]
for sbp in all_sbp(placement, max_dim=len(shape), except_partial_sum=True):
METHOD_NAME(test_case, func, shape, placement, sbp)
full_values = [2, 3, 4]
shapes = [(8,), (8, 8,), (8, 8, 8)]
for full_value in full_values:
for shape in shapes:
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=len(shape)):
_test_global_graph_new_full(
test_case, shape, full_value, placement, sbp
)
if __name__ == "__main__":
unittest.main() |
299,762 | list | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class Operations(object):
"""Operations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.alertsmanagement.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def METHOD_NAME(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.OperationsList"]
"""List all operations available through Azure Alerts Management Resource Provider.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationsList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.alertsmanagement.models.OperationsList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-05-05-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.METHOD_NAME.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('OperationsList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
METHOD_NAME.metadata = {'url': '/providers/Microsoft.AlertsManagement/operations'} # type: ignore |
299,763 | test can execute synchronous | # Copyright 2023 Avaiga Private Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
from datetime import datetime
from functools import partial
from time import sleep
from unittest import mock
from unittest.mock import MagicMock
from pytest import raises
from src.taipy.core import DataNodeId, JobId, TaskId
from src.taipy.core._orchestrator._dispatcher._development_job_dispatcher import _DevelopmentJobDispatcher
from src.taipy.core._orchestrator._dispatcher._standalone_job_dispatcher import _StandaloneJobDispatcher
from src.taipy.core._orchestrator._orchestrator_factory import _OrchestratorFactory
from src.taipy.core.config.job_config import JobConfig
from src.taipy.core.data._data_manager import _DataManager
from src.taipy.core.job.job import Job
from src.taipy.core.task.task import Task
from taipy.config.config import Config
def execute(lock):
with lock:
...
return None
def _error():
raise RuntimeError("Something bad has happened")
def test_build_development_job_dispatcher():
Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
_OrchestratorFactory._build_dispatcher()
dispatcher = _OrchestratorFactory._dispatcher
assert isinstance(dispatcher, _DevelopmentJobDispatcher)
assert dispatcher._nb_available_workers == 1
with raises(NotImplementedError):
assert dispatcher.start()
assert dispatcher.is_running()
with raises(NotImplementedError):
dispatcher.stop()
def test_build_standalone_job_dispatcher():
Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
_OrchestratorFactory._build_dispatcher()
dispatcher = _OrchestratorFactory._dispatcher
assert not isinstance(dispatcher, _DevelopmentJobDispatcher)
assert isinstance(dispatcher, _StandaloneJobDispatcher)
assert isinstance(dispatcher._executor, ProcessPoolExecutor)
assert dispatcher._nb_available_workers == 2
assert_true_after_120_second_max(dispatcher.is_running)
dispatcher.stop()
dispatcher.join()
assert_true_after_120_second_max(lambda: not dispatcher.is_running())
def test_can_execute_2_workers():
Config.configure_job_executions(mode=JobConfig._STANDALONE_MODE, max_nb_of_workers=2)
m = multiprocessing.Manager()
lock = m.Lock()
task_id = TaskId("task_id1")
output = list(_DataManager._bulk_get_or_create([Config.configure_data_node("input1", default_data=21)]).values())
_OrchestratorFactory._build_dispatcher()
task = Task(
config_id="name",
properties={},
input=[],
function=partial(execute, lock),
output=output,
id=task_id,
)
job_id = JobId("id1")
job = Job(job_id, task, "submit_id", task.id)
dispatcher = _StandaloneJobDispatcher(_OrchestratorFactory._orchestrator)
with lock:
assert dispatcher._can_execute()
dispatcher._dispatch(job)
assert dispatcher._can_execute()
dispatcher._dispatch(job)
assert not dispatcher._can_execute()
assert_true_after_120_second_max(lambda: dispatcher._can_execute())
def METHOD_NAME():
Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
_OrchestratorFactory._build_dispatcher()
task_id = TaskId("task_id1")
task = Task(config_id="name", properties={}, input=[], function=print, output=[], id=task_id)
job_id = JobId("id1")
job = Job(job_id, task, "submit_id", task.id)
dispatcher = _OrchestratorFactory._dispatcher
assert dispatcher._can_execute()
dispatcher._dispatch(job)
assert dispatcher._can_execute()
def test_exception_in_user_function():
Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
_OrchestratorFactory._build_dispatcher()
task_id = TaskId("task_id1")
job_id = JobId("id1")
task = Task(config_id="name", properties={}, input=[], function=_error, output=[], id=task_id)
job = Job(job_id, task, "submit_id", task.id)
dispatcher = _OrchestratorFactory._dispatcher
dispatcher._dispatch(job)
assert job.is_failed()
assert 'RuntimeError("Something bad has happened")' in str(job.stacktrace[0])
def test_exception_in_writing_data():
Config.configure_job_executions(mode=JobConfig._DEVELOPMENT_MODE)
_OrchestratorFactory._build_dispatcher()
task_id = TaskId("task_id1")
job_id = JobId("id1")
output = MagicMock()
output.id = DataNodeId("output_id")
output.config_id = "my_raising_datanode"
output._is_in_cache = False
output.write.side_effect = ValueError()
task = Task(config_id="name", properties={}, input=[], function=print, output=[output], id=task_id)
job = Job(job_id, task, "submit_id", task.id)
dispatcher = _OrchestratorFactory._dispatcher
with mock.patch("src.taipy.core.data._data_manager._DataManager._get") as get:
get.return_value = output
dispatcher._dispatch(job)
assert job.is_failed()
assert "node" in job.stacktrace[0]
def assert_true_after_120_second_max(assertion):
start = datetime.now()
while (datetime.now() - start).seconds < 120:
sleep(0.1) # Limit CPU usage
if assertion():
return
assert assertion() |
299,764 | test exit status multiple studies | #!/usr/bin/env python3
"""
Copyright (c) 2018 The Hyve B.V.
This code is licensed under the GNU Affero General Public License (AGPL),
version 3, or (at your option) any later version.
"""
import unittest
import sys
import os
import glob
from contextlib import contextmanager
from io import StringIO
import logging.handlers
import tempfile
import shutil
from importer import validateStudies, cbioportal_common
# globals:
PORTAL_INFO_DIR = 'test_data/api_json_system_tests'
# FIXME: replace by contextlib.redirect_stdout when moving to Python 3.4+
@contextmanager
def redirect_stdout(new_target):
"""Temporarily re-bind sys.stdout to a different file-like object."""
old_target = sys.stdout
sys.stdout = new_target
try:
yield
finally:
sys.stdout = old_target
# FIXME: replace by tempfile.TemporaryDirectory when moving to Python 3.2+
@contextmanager
def TemporaryDirectory():
"""Create a temporary directory and remove it after use."""
path = tempfile.mkdtemp()
try:
yield path
finally:
shutil.rmtree(path)
class ValidateStudiesSystemTester(unittest.TestCase):
"""Test cases around running the validateStudies script
(such as "does it return the correct exit status?")
"""
def test_exit_status_success(self):
"""study 0 : no errors, expected exit_status = 0.
Possible exit statuses:
0: 'VALID',
1: 'INVALID'
"""
# Build up arguments and run
print("===study 0")
args = ['--list-of-studies', 'test_data/study_es_0/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(0, exit_status)
def test_exit_status_success_for_legacy_cna_discrete(self):
"""study 0 : no errors, expected exit_status = 0.
Possible exit statuses:
0: 'VALID',
1: 'INVALID'
"""
# Build up arguments and run
print("===study 0 legacy")
args = ['--list-of-studies', 'test_data/study_es_0_legacy_cna_discrete/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(0, exit_status)
def test_exit_status_failure(self):
"""study 1 : errors, expected exit_status = 1."""
# Build up arguments and run
print("===study 1")
args = ['--list-of-studies', 'test_data/study_es_1/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
@unittest.skip("Study test_data/study_es_invalid is not implemented")
def test_exit_status_invalid(self):
"""test to fail: study directory not existing, so cannot run validation, expected exit_status = 1."""
# Build up arguments and run
print("===study invalid")
args = ['--list-of-studies', 'test_data/study_es_invalid/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
def test_exit_status_warnings(self):
"""study 3 : warnings only, expected exit_status = 0."""
# Build up arguments and run
print("===study 3")
args = ['--list-of-studies', 'test_data/study_es_3/',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(0, exit_status)
def METHOD_NAME(self):
"""Running validateStudies for four studies tested above, expected exit_status = 1."""
# Build up arguments and run
print("===study0,1,invalid,3")
args = ['--root-directory', 'test_data',
'--list-of-studies', 'study_es_0,study_es_1,study_es_invalid,study_es_3',
'--portal_info_dir', PORTAL_INFO_DIR]
args = validateStudies.interface(args)
exit_status = validateStudies.main(args)
self.assertEqual(1, exit_status)
def test_logs_study_label_before_validation_messages(self):
"""The log file should start with a line describing the study.
A subsequent study should have its own header line.
"""
# given
with TemporaryDirectory() as out_dir_path:
args = [
'--root-directory', 'test_data',
'--list-of-studies', 'study_various_issues,study_es_0',
'--portal_info_dir', PORTAL_INFO_DIR,
'--html-folder', out_dir_path
]
# when
with redirect_stdout(StringIO()):
parsed_args = validateStudies.interface(args)
validateStudies.main(parsed_args)
# then
log_file_path = glob.glob(os.path.join(out_dir_path, 'log*.txt'))[0]
with open(log_file_path) as log_file:
log_file_lines = log_file.readlines()
self.assertIn('study_various_issues', log_file_lines[0])
last_line_of_first_study = next(
index
for index, line
in enumerate(log_file_lines)
if 'Validation complete' in line)
self.assertIn(
'study_es_0',
log_file_lines[last_line_of_first_study + 1])
class ValidateStudiesWithEagerlyFlushingCollapser(unittest.TestCase):
"""Test validation with the collapser flushing due to buffer capacity.
When validating very large studies, it will flush partway through a study.
This can be simulated with a smaller study by lowering the buffer capacity.
"""
def setUp(self):
"""Make the collapsing log message handler flush more eagerly."""
class EagerFlusher(logging.handlers.MemoryHandler):
def __init__(self, *args, **kwargs):
"""Set the buffer capacity to 3 regardless of args."""
# leave out any capacity argument from args and kwargs
args = args[1:]
kwargs = {k: v for k, v in list(kwargs.items()) if k != 'capacity'}
# pass 3 as the capacity argument
super(EagerFlusher, self).__init__(3, *args, **kwargs)
class EagerFlushingCollapser(
cbioportal_common.CollapsingLogMessageHandler,
EagerFlusher):
"""CollapsingLogMessageHandler with EagerFlusher overrides."""
pass
self.original_collapser = cbioportal_common.CollapsingLogMessageHandler
cbioportal_common.CollapsingLogMessageHandler = EagerFlusher
def tearDown(self):
"""Restore the unmodified collapsing log message handler."""
cbioportal_common.CollapsingLogMessageHandler = self.original_collapser
def test_leaves_stdout_uncluttered_if_validation_produces_errors(self):
"""Test flushing the collapsing logger halfway through a study.
This should not spill the validation messages to stdout as it previously
did, even crashing with a KeyError sometimes because non-validator
log messages got flushed into the collapsing logic.
"""
output_stream = StringIO()
with redirect_stdout(output_stream):
args = validateStudies.interface([
'--root-directory', 'test_data',
'--list-of-studies', 'study_various_issues/',
'--portal_info_dir', PORTAL_INFO_DIR])
validateStudies.main(args)
self.assertNotIn(
'ERROR',
output_stream.getvalue(),
'The validation errors should not be printed to the console.')
if __name__ == '__main__':
unittest.main(buffer=True) |
299,765 | get samples | """
dataset related classes and methods
"""
# pylint: disable=unused-argument,missing-docstring
import logging
import sys
import time
import cv2
import numpy as np
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("dataset")
class Item():
def __init__(self, label, img, idx):
self.label = label
self.img = img
self.idx = idx
self.start = time.time()
def usleep(sec):
if sys.platform == 'win32':
# on windows time.sleep() doesn't work to well
import ctypes
kernel32 = ctypes.windll.kernel32
timer = kernel32.CreateWaitableTimerA(ctypes.c_void_p(), True, ctypes.c_void_p())
delay = ctypes.c_longlong(int(-1 * (10 * 1000000 * sec)))
kernel32.SetWaitableTimer(timer, ctypes.byref(delay), 0, ctypes.c_void_p(), ctypes.c_void_p(), False)
kernel32.WaitForSingleObject(timer, 0xffffffff)
else:
time.sleep(sec)
class Dataset():
def __init__(self):
self.arrival = None
self.image_list = []
self.label_list = []
self.image_list_inmemory = {}
self.last_loaded = -1
def preprocess(self, use_cache=True):
raise NotImplementedError("Dataset:preprocess")
def get_item_count(self):
return len(self.image_list)
def get_list(self):
raise NotImplementedError("Dataset:get_list")
def load_query_samples(self, sample_list):
self.image_list_inmemory = {}
for sample in sample_list:
self.image_list_inmemory[sample], _ = self.get_item(sample)
self.last_loaded = time.time()
def unload_query_samples(self, sample_list):
if sample_list:
for sample in sample_list:
if sample in self.image_list_inmemory :
del self.image_list_inmemory[sample]
else:
self.image_list_inmemory = {}
def METHOD_NAME(self, id_list):
data = np.array([self.image_list_inmemory[id] for id in id_list])
return data, self.label_list[id_list]
def get_item_loc(self, id):
raise NotImplementedError("Dataset:get_item_loc")
#
# Post processing
#
class PostProcessCommon:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
n = len(results[0])
for idx in range(0, n):
result = results[0][idx] + self.offset
processed_results.append([result])
if result == expected[idx]:
self.good += 1
self.total += n
return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
class PostProcessArgMax:
def __init__(self, offset=0):
self.offset = offset
self.good = 0
self.total = 0
def __call__(self, results, ids, expected=None, result_dict=None):
processed_results = []
results = np.argmax(results[0], axis=1)
n = results.shape[0]
for idx in range(0, n):
result = results[idx] + self.offset
processed_results.append([result])
if result == expected[idx]:
self.good += 1
self.total += n
return processed_results
def add_results(self, results):
pass
def start(self):
self.good = 0
self.total = 0
def finalize(self, results, ds=False, output_dir=None):
results["good"] = self.good
results["total"] = self.total
#
# pre-processing
#
def center_crop(img, out_height, out_width):
height, width, _ = img.shape
left = int((width - out_width) / 2)
right = int((width + out_width) / 2)
top = int((height - out_height) / 2)
bottom = int((height + out_height) / 2)
img = img[top:bottom, left:right]
return img
def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
height, width, _ = img.shape
new_height = int(100. * out_height / scale)
new_width = int(100. * out_width / scale)
if height > width:
w = new_width
h = int(new_height * height / width)
else:
h = new_height
w = int(new_width * width / height)
img = cv2.resize(img, (w, h), interpolation=inter_pol)
return img
def pre_process_vgg(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
cv2_interpol = cv2.INTER_AREA
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2_interpol)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
# normalize image
means = np.array([123.68, 116.78, 103.94], dtype=np.float32)
img -= means
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_mobilenet(img, dims=None, need_transpose=False):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
output_height, output_width, _ = dims
img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
img = center_crop(img, output_height, output_width)
img = np.asarray(img, dtype='float32')
img /= 255.0
img -= 0.5
img *= 2
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_imagenet_pytorch(img, dims=None, need_transpose=False):
from PIL import Image
import torchvision.transforms.functional as F
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
img = F.resize(img, 256, Image.BILINEAR)
img = F.center_crop(img, 224)
img = F.to_tensor(img)
img = F.normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], inplace=False)
if not need_transpose:
img = img.permute(1, 2, 0) # NHWC
img = np.asarray(img, dtype='float32')
return img
def maybe_resize(img, dims):
img = np.array(img, dtype=np.float32)
if len(img.shape) < 3 or img.shape[2] != 3:
# some images might be grayscale
img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if dims != None:
im_height, im_width, _ = dims
img = cv2.resize(img, (im_width, im_height), interpolation=cv2.INTER_LINEAR)
return img
def pre_process_coco_mobilenet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img = np.asarray(img, dtype=np.uint8)
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_pt_mobilenet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img -= 127.5
img /= 127.5
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_resnet34(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
mean = np.array([0.485, 0.456, 0.406], dtype=np.float32)
std = np.array([0.229, 0.224, 0.225], dtype=np.float32)
img = img / 255. - mean
img = img / std
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_coco_resnet34_tf(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
mean = np.array([123.68, 116.78, 103.94], dtype=np.float32)
img = img - mean
if need_transpose:
img = img.transpose([2, 0, 1])
return img
def pre_process_openimages_retinanet(img, dims=None, need_transpose=False):
img = maybe_resize(img, dims)
img /= 255.
# transpose if needed
if need_transpose:
img = img.transpose([2, 0, 1])
return im |
299,766 | is legacy state | #
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import logging
from datetime import datetime, timedelta
from typing import Any, MutableMapping
from airbyte_cdk.sources.file_based.config.file_based_stream_config import FileBasedStreamConfig
from airbyte_cdk.sources.file_based.remote_file import RemoteFile
from airbyte_cdk.sources.file_based.stream.cursor import DefaultFileBasedCursor
from airbyte_cdk.sources.file_based.types import StreamState
logger = logging.Logger("source-S3")
class Cursor(DefaultFileBasedCursor):
_DATE_FORMAT = "%Y-%m-%d"
_LEGACY_DATE_TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
_V4_MIGRATION_BUFFER = timedelta(hours=1)
_V3_MIN_SYNC_DATE_FIELD = "v3_min_sync_date"
def __init__(self, stream_config: FileBasedStreamConfig, **_: Any):
super().__init__(stream_config)
self._running_migration = False
self._v3_migration_start_datetime = None
def set_initial_state(self, value: StreamState) -> None:
if self.METHOD_NAME(value):
self._running_migration = True
value = self._convert_legacy_state(value)
else:
self._running_migration = False
self._v3_migration_start_datetime = (
datetime.strptime(value.get(Cursor._V3_MIN_SYNC_DATE_FIELD), DefaultFileBasedCursor.DATE_TIME_FORMAT)
if Cursor._V3_MIN_SYNC_DATE_FIELD in value
else None
)
super().set_initial_state(value)
def get_state(self) -> StreamState:
state = {"history": self._file_to_datetime_history, self.CURSOR_FIELD: self._get_cursor()}
if self._v3_migration_start_datetime:
return {
**state,
**{
Cursor._V3_MIN_SYNC_DATE_FIELD: datetime.strftime(
self._v3_migration_start_datetime, DefaultFileBasedCursor.DATE_TIME_FORMAT
)
},
}
else:
return state
def _should_sync_file(self, file: RemoteFile, logger: logging.Logger) -> bool:
"""
Never sync files earlier than the v3 migration start date. V3 purged the history from the state, so we assume all files were already synced
Else if the currenty sync is migrating from v3 to v4, sync all files that were modified within one hour of the last sync
Else sync according to the default logic
"""
if self._v3_migration_start_datetime and file.last_modified < self._v3_migration_start_datetime:
return False
elif self._running_migration:
return True
else:
return super()._should_sync_file(file, logger)
@staticmethod
def METHOD_NAME(value: StreamState) -> bool:
if not value:
return False
try:
# Verify datetime format in history
history = value.get("history", {}).keys()
if history:
item = list(value.get("history", {}).keys())[0]
datetime.strptime(item, Cursor._DATE_FORMAT)
# verify the format of the last_modified cursor
last_modified_at_cursor = value.get(DefaultFileBasedCursor.CURSOR_FIELD)
if not last_modified_at_cursor:
return False
datetime.strptime(last_modified_at_cursor, Cursor._LEGACY_DATE_TIME_FORMAT)
except ValueError:
return False
return True
@staticmethod
def _convert_legacy_state(legacy_state: StreamState) -> MutableMapping[str, Any]:
"""
Transform the history from the old state message format to the new.
e.g.
{
"2022-05-26": ["simple_test.csv.csv", "simple_test_2.csv"],
"2022-05-27": ["simple_test_2.csv", "redshift_result.csv"],
...
}
=>
{
"simple_test.csv": "2022-05-26T00:00:00.000000Z",
"simple_test_2.csv": "2022-05-27T00:00:00.000000Z",
"redshift_result.csv": "2022-05-27T00:00:00.000000Z",
...
}
"""
converted_history = {}
legacy_cursor = legacy_state[DefaultFileBasedCursor.CURSOR_FIELD]
cursor_datetime = datetime.strptime(legacy_cursor, Cursor._LEGACY_DATE_TIME_FORMAT)
logger.info(f"Converting v3 -> v4 state. v3_cursor={legacy_cursor} v3_history={legacy_state.get('history')}")
for date_str, filenames in legacy_state.get("history", {}).items():
datetime_obj = Cursor._get_adjusted_date_timestamp(cursor_datetime, datetime.strptime(date_str, Cursor._DATE_FORMAT))
for filename in filenames:
if filename in converted_history:
if datetime_obj > datetime.strptime(
converted_history[filename],
DefaultFileBasedCursor.DATE_TIME_FORMAT,
):
converted_history[filename] = datetime_obj.strftime(DefaultFileBasedCursor.DATE_TIME_FORMAT)
else:
# If the file was already synced with a later timestamp, ignore
pass
else:
converted_history[filename] = datetime_obj.strftime(DefaultFileBasedCursor.DATE_TIME_FORMAT)
if converted_history:
filename, _ = max(converted_history.items(), key=lambda x: (x[1], x[0]))
cursor = f"{cursor_datetime}_{filename}"
else:
# Having a cursor with empty history is not expected, but we handle it.
logger.warning(f"Cursor found without a history object; this is not expected. cursor_value={legacy_cursor}")
# Note: we convert to the v4 cursor granularity, but since no items are in the history we simply use the
# timestamp as the cursor value instead of the concatenation of timestamp_filename, which is the v4
# cursor format.
# This is okay because the v4 cursor is kept for posterity but is not actually used in the v4 code. If we
# start to use the cursor we may need to revisit this logic.
cursor = cursor_datetime
converted_history = {}
v3_migration_start_datetime = cursor_datetime - Cursor._V4_MIGRATION_BUFFER
return {
"history": converted_history,
DefaultFileBasedCursor.CURSOR_FIELD: cursor,
Cursor._V3_MIN_SYNC_DATE_FIELD: v3_migration_start_datetime.strftime(DefaultFileBasedCursor.DATE_TIME_FORMAT),
}
@staticmethod
def _get_adjusted_date_timestamp(cursor_datetime: datetime, file_datetime: datetime) -> datetime:
if file_datetime > cursor_datetime:
return file_datetime
else:
# Extract the dates so they can be compared
cursor_date = cursor_datetime.date()
date_obj = file_datetime.date()
# If same day, update the time to the cursor time
if date_obj == cursor_date:
return file_datetime.replace(hour=cursor_datetime.hour, minute=cursor_datetime.minute, second=cursor_datetime.second)
# If previous, update the time to end of day
else:
return file_datetime.replace(hour=23, minute=59, second=59, microsecond=999999) |
299,767 | synchronization | import time
import sys
import json
import ftrack_api
from openpype_modules.ftrack.lib import ServerAction
from openpype_modules.ftrack.lib.avalon_sync import SyncEntitiesFactory
class SyncToAvalonServer(ServerAction):
"""
Synchronizing data action - from Ftrack to Avalon DB
Stores all information about entity.
- Name(string) - Most important information = identifier of entity
- Parent(ObjectId) - Avalon Project Id, if entity is not project itself
- Data(dictionary):
- VisualParent(ObjectId) - Avalon Id of parent asset
- Parents(array of string) - All parent names except project
- Tasks(dictionary of dictionaries) - Tasks on asset
- FtrackId(string)
- entityType(string) - entity's type on Ftrack
* All Custom attributes in group 'Avalon'
- custom attributes that start with 'avalon_' are skipped
* These information are stored for entities in whole project.
Avalon ID of asset is stored to Ftrack
- Custom attribute 'avalon_mongo_id'.
- action IS NOT creating this Custom attribute if doesn't exist
- run 'Create Custom Attributes' action
- or do it manually (Not recommended)
"""
#: Action identifier.
identifier = "sync.to.avalon.server"
#: Action label.
label = "OpenPype Admin"
variant = "- Sync To Avalon (Server)"
#: Action description.
description = "Send data from Ftrack to Avalon"
role_list = {"Pypeclub", "Administrator", "Project Manager"}
settings_key = "sync_to_avalon"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.entities_factory = SyncEntitiesFactory(self.log, self.session)
def discover(self, session, entities, event):
""" Validation """
# Check if selection is valid
is_valid = False
for ent in event["data"]["selection"]:
# Ignore entities that are not tasks or projects
if ent["entityType"].lower() in ["show", "task"]:
is_valid = True
break
if is_valid:
is_valid = self.valid_roles(session, entities, event)
return is_valid
def launch(self, session, in_entities, event):
self.log.debug("{}: Creating job".format(self.label))
user_entity = session.query(
"User where id is {}".format(event["source"]["user"]["id"])
).one()
job_entity = session.create("Job", {
"user": user_entity,
"status": "running",
"data": json.dumps({
"description": "Sync to avalon is running..."
})
})
session.commit()
project_entity = self.get_project_from_entity(in_entities[0])
project_name = project_entity["full_name"]
try:
result = self.METHOD_NAME(event, project_name)
except Exception:
self.log.error(
"Synchronization failed due to code error", exc_info=True
)
description = "Sync to avalon Crashed (Download traceback)"
self.add_traceback_to_job(
job_entity, session, sys.exc_info(), description
)
msg = "An error has happened during synchronization"
title = "Synchronization report ({}):".format(project_name)
items = []
items.append({
"type": "label",
"value": "# {}".format(msg)
})
items.append({
"type": "label",
"value": (
"<p>Download report from job for more information.</p>"
)
})
report = {}
try:
report = self.entities_factory.report()
except Exception:
pass
_items = report.get("items") or []
if _items:
items.append(self.entities_factory.report_splitter)
items.extend(_items)
self.show_interface(items, title, event, submit_btn_label="Ok")
return {"success": True, "message": msg}
job_entity["status"] = "done"
job_entity["data"] = json.dumps({
"description": "Sync to avalon finished."
})
session.commit()
return result
def METHOD_NAME(self, event, project_name):
time_start = time.time()
self.show_message(event, "Synchronization - Preparing data", True)
try:
output = self.entities_factory.launch_setup(project_name)
if output is not None:
return output
time_1 = time.time()
self.entities_factory.set_cutom_attributes()
time_2 = time.time()
# This must happen before all filtering!!!
self.entities_factory.prepare_avalon_entities(project_name)
time_3 = time.time()
self.entities_factory.filter_by_ignore_sync()
time_4 = time.time()
self.entities_factory.duplicity_regex_check()
time_5 = time.time()
self.entities_factory.prepare_ftrack_ent_data()
time_6 = time.time()
self.entities_factory.synchronize()
time_7 = time.time()
self.log.debug(
"*** Synchronization finished ***"
)
self.log.debug(
"preparation <{}>".format(time_1 - time_start)
)
self.log.debug(
"set_cutom_attributes <{}>".format(time_2 - time_1)
)
self.log.debug(
"prepare_avalon_entities <{}>".format(time_3 - time_2)
)
self.log.debug(
"filter_by_ignore_sync <{}>".format(time_4 - time_3)
)
self.log.debug(
"duplicity_regex_check <{}>".format(time_5 - time_4)
)
self.log.debug(
"prepare_ftrack_ent_data <{}>".format(time_6 - time_5)
)
self.log.debug(
"synchronize <{}>".format(time_7 - time_6)
)
self.log.debug(
"* Total time: {}".format(time_7 - time_start)
)
if self.entities_factory.project_created:
event = ftrack_api.event.base.Event(
topic="openpype.project.created",
data={"project_name": project_name}
)
self.session.event_hub.publish(event)
report = self.entities_factory.report()
if report and report.get("items"):
default_title = "Synchronization report ({}):".format(
project_name
)
self.show_interface(
items=report["items"],
title=report.get("title", default_title),
event=event
)
return {
"success": True,
"message": "Synchronization Finished"
}
finally:
try:
self.entities_factory.dbcon.uninstall()
except Exception:
pass
try:
self.entities_factory.session.close()
except Exception:
pass
def register(session):
'''Register plugin. Called when used as an plugin.'''
SyncToAvalonServer(session).register() |
299,768 | the history is the same as saved | # -*- coding: utf-8 -*-
"""
Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
This file is part of fiware-pep-steelskin
fiware-pep-steelskin is free software: you can redistribute it and/or
modify it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
fiware-pep-steelskin is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with fiware-pep-steelskin.
If not, see http://www.gnu.org/licenses/.
For those usages not covered by the GNU Affero General Public License
please contact with::[iot_support@tid.es]
"""
__author__ = 'Jon Calderin Goñi <jon.caldering@gmail.com>'
import requests
from lettuce import step, world
@step('the keystone proxy history reset')
def the_keystone_proxy_history_reset(step):
"""
Reset the history of the keystone proxy
:param step:
:return:
"""
requests.request('get', 'http://{ks_proxy_ip}:{ks_proxy_port}/reset_history'.format(ks_proxy_ip=world.ks_proxy_ip,
ks_proxy_port=world.ks_proxy_port))
@step('the Keystone proxy receive the last petition "([^"]*)" from PEP')
def the_keystone_proxy_receive_the_last_petition_from_pep(step, last_petition):
"""
Check if the last path proxy received is the same as given one
:param step:
:param last_petition:
:return:
"""
resp = requests.request('GET',
'http://{ks_proxy_ip}:{ks_proxy_port}/last_path'.format(ks_proxy_ip=world.ks_proxy_ip,
ks_proxy_port=world.ks_proxy_port)).text
assert resp == last_petition, 'The last petition done to KS is not the defined in the test, \n\tdefined: {done}\n\tdone: {resp}'.format(
resp=resp, done=last_petition)
@step('the access control proxy receive the last petition "([^"]*)" from PEP')
def the_access_control_proxy_receive_the_last_petition(step, last_petition):
"""
Check the access control proxy and assert if the las petition given is the las petition asked by pep to access control
:param step:
:param last_petition:
:return:
"""
resp = requests.request('GET', 'http://{ac_proxy_ip}:{ac_proxy_port}/last_path'.format(ac_proxy_ip=world.ac_proxy_ip, ac_proxy_port=world.ac_proxy_port)).text
assert resp == last_petition, 'The last petition done to ac is not the defined in the test'
@step('the history is saved$')
def the_history_is_saved(step):
"""
Get the keystone history from the proxy
:param step:
:return:
"""
resp = requests.request('GET', 'http://{ks_proxy_ip}:{ks_proxy_port}/history'.format(ks_proxy_ip=world.ks_proxy_ip, ks_proxy_port=world.ks_proxy_port)).text
world.history = resp
@step('the history is the same as saved')
def METHOD_NAME(step):
"""
Check the history saved has not changed
:param step:
:return:
"""
resp = requests.request('GET', 'http://{ks_proxy_ip}:{ks_proxy_port}/history'.format(ks_proxy_ip=world.ks_proxy_ip, ks_proxy_port=world.ks_proxy_port)).text
assert world.history == resp, 'The history changed, it has to be equal'
@step('the history of petitions adds "([^"]*)" petition')
def the_history_off_petitions_adds_a_petition(step, petitions_added):
"""
Check if the history has more petitions than before, when it was saved
:param step:
:param petitions_added:
:return:
"""
resp = requests.request('GET', 'http://{ks_proxy_ip}:{ks_proxy_port}/history'.format(ks_proxy_ip=world.ks_proxy_ip, ks_proxy_port=world.ks_proxy_port)).text
assert hasattr(world, 'history'), 'There is not a history saved in WORLD'
history_list = eval(world.history)
history_new_list = eval(resp)
world.last_petition_added = history_new_list[len(history_new_list)-1]
assert len(history_list)+int(petitions_added) == len(history_new_list), 'The petitions added to the history are not the expected'
@step('the value added to the history is a request of the cache expired')
def the_value_added_to_the_history_is_ok(step):
"""
Check if the last petition is the same as the new petition saved
:param step:
:return:
"""
assert world.new_petition == world.last_petition_added, 'The petition asked is not the expected' |
299,769 | add bento service servicer to server | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from bentoml.grpc.v1 import service_pb2 as bentoml_dot_grpc_dot_v1_dot_service__pb2
class BentoServiceStub(object):
"""a gRPC BentoServer.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Call = channel.unary_unary(
'/bentoml.grpc.v1.BentoService/Call',
request_serializer=bentoml_dot_grpc_dot_v1_dot_service__pb2.Request.SerializeToString,
response_deserializer=bentoml_dot_grpc_dot_v1_dot_service__pb2.Response.FromString,
)
self.ServiceMetadata = channel.unary_unary(
'/bentoml.grpc.v1.BentoService/ServiceMetadata',
request_serializer=bentoml_dot_grpc_dot_v1_dot_service__pb2.ServiceMetadataRequest.SerializeToString,
response_deserializer=bentoml_dot_grpc_dot_v1_dot_service__pb2.ServiceMetadataResponse.FromString,
)
class BentoServiceServicer(object):
"""a gRPC BentoServer.
"""
def Call(self, request, context):
"""Call handles methodcaller of given API entrypoint.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ServiceMetadata(self, request, context):
"""ServiceMetadata returns metadata of bentoml.Service.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def METHOD_NAME(servicer, server):
rpc_method_handlers = {
'Call': grpc.unary_unary_rpc_method_handler(
servicer.Call,
request_deserializer=bentoml_dot_grpc_dot_v1_dot_service__pb2.Request.FromString,
response_serializer=bentoml_dot_grpc_dot_v1_dot_service__pb2.Response.SerializeToString,
),
'ServiceMetadata': grpc.unary_unary_rpc_method_handler(
servicer.ServiceMetadata,
request_deserializer=bentoml_dot_grpc_dot_v1_dot_service__pb2.ServiceMetadataRequest.FromString,
response_serializer=bentoml_dot_grpc_dot_v1_dot_service__pb2.ServiceMetadataResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'bentoml.grpc.v1.BentoService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class BentoService(object):
"""a gRPC BentoServer.
"""
@staticmethod
def Call(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/bentoml.grpc.v1.BentoService/Call',
bentoml_dot_grpc_dot_v1_dot_service__pb2.Request.SerializeToString,
bentoml_dot_grpc_dot_v1_dot_service__pb2.Response.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ServiceMetadata(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/bentoml.grpc.v1.BentoService/ServiceMetadata',
bentoml_dot_grpc_dot_v1_dot_service__pb2.ServiceMetadataRequest.SerializeToString,
bentoml_dot_grpc_dot_v1_dot_service__pb2.ServiceMetadataResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata) |
299,770 | bootstrap runtime environment | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""An entry point for runtime environment. This must be kept independent of SageMaker PySDK"""
from __future__ import absolute_import
import argparse
import sys
import os
import shutil
import pathlib
if __package__ is None or __package__ == "":
from runtime_environment_manager import RuntimeEnvironmentManager, get_logger
else:
from sagemaker.remote_function.runtime_environment.runtime_environment_manager import (
RuntimeEnvironmentManager,
get_logger,
)
SUCCESS_EXIT_CODE = 0
DEFAULT_FAILURE_CODE = 1
REMOTE_FUNCTION_WORKSPACE = "sm_rf_user_ws"
BASE_CHANNEL_PATH = "/opt/ml/input/data"
FAILURE_REASON_PATH = "/opt/ml/output/failure"
PRE_EXECUTION_SCRIPT_NAME = "pre_exec.sh"
JOB_REMOTE_FUNCTION_WORKSPACE = "sagemaker_remote_function_workspace"
logger = get_logger()
def main():
"""Entry point for bootstrap script"""
exit_code = DEFAULT_FAILURE_CODE
try:
args = _parse_agrs()
client_python_version = args.client_python_version
job_conda_env = args.job_conda_env
conda_env = job_conda_env or os.getenv("SAGEMAKER_JOB_CONDA_ENV")
RuntimeEnvironmentManager()._validate_python_version(client_python_version, conda_env)
METHOD_NAME(client_python_version, conda_env)
exit_code = SUCCESS_EXIT_CODE
except Exception as e: # pylint: disable=broad-except
logger.exception("Error encountered while bootstrapping runtime environment: %s", e)
_write_failure_reason_file(str(e))
finally:
sys.exit(exit_code)
def METHOD_NAME(
client_python_version: str,
conda_env: str = None,
):
"""Bootstrap runtime environment for remote function invocation
Args:
conda_env (str): conda environment to be activated. Default is None.
"""
workspace_archive_dir_path = os.path.join(BASE_CHANNEL_PATH, REMOTE_FUNCTION_WORKSPACE)
if not os.path.exists(workspace_archive_dir_path):
logger.info(
"Directory '%s' does not exist. Assuming no dependencies to bootstrap.",
workspace_archive_dir_path,
)
return
# Unpack user workspace archive first.
workspace_archive_path = os.path.join(workspace_archive_dir_path, "workspace.zip")
if not os.path.isfile(workspace_archive_path):
logger.info(
"Workspace archive '%s' does not exist. Assuming no dependencies to bootstrap.",
workspace_archive_dir_path,
)
return
workspace_unpack_dir = pathlib.Path(os.getcwd()).absolute()
shutil.unpack_archive(filename=workspace_archive_path, extract_dir=workspace_unpack_dir)
logger.info("Successfully unpacked workspace archive at '%s'.", workspace_unpack_dir)
workspace_unpack_dir = pathlib.Path(workspace_unpack_dir, JOB_REMOTE_FUNCTION_WORKSPACE)
# Handle pre-execution commands
path_to_pre_exec_script = os.path.join(workspace_unpack_dir, PRE_EXECUTION_SCRIPT_NAME)
RuntimeEnvironmentManager().run_pre_exec_script(pre_exec_script_path=path_to_pre_exec_script)
# Handle dependencies file.
dependencies_file = None
for file in os.listdir(workspace_unpack_dir):
if file.endswith(".txt") or file.endswith(".yml") or file.endswith(".yaml"):
dependencies_file = os.path.join(workspace_unpack_dir, file)
break
if dependencies_file:
RuntimeEnvironmentManager().bootstrap(
local_dependencies_file=dependencies_file,
conda_env=conda_env,
client_python_version=client_python_version,
)
else:
logger.info(
"Did not find any dependency file in workspace directory at '%s'."
" Assuming no additional dependencies to install.",
workspace_archive_dir_path,
)
def _write_failure_reason_file(failure_msg):
"""Create a file 'failure' with failure reason written if bootstrap runtime env failed.
See: https://docs.aws.amazon.com/sagemaker/latest/dg/your-algorithms-training-algo.html
Args:
failure_msg: The content of file to be written.
"""
if not os.path.exists(FAILURE_REASON_PATH):
with open(FAILURE_REASON_PATH, "w") as f:
f.write("RuntimeEnvironmentError: " + failure_msg)
def _parse_agrs():
"""Parses CLI arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--job_conda_env", type=str)
parser.add_argument("--client_python_version")
args, _ = parser.parse_known_args()
return args
if __name__ == "__main__":
main() |
299,771 | data | #
# Copyright 2010 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-howto
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
"""
A base class is created.
Classes based upon this are used to make more user-friendly interfaces
to the doxygen xml docs than the generated classes provide.
"""
import os
import pdb
from xml.parsers.expat import ExpatError
from .generated import compound
class Base(object):
class Duplicate(Exception):
pass
class NoSuchMember(Exception):
pass
class ParsingError(Exception):
pass
def __init__(self, parse_data, top=None):
self._parsed = False
self._error = False
self._parse_data = parse_data
self._members = []
self._dict_members = {}
self._in_category = {}
self._data = {}
if top is not None:
self._xml_path = top._xml_path
# Set up holder of references
else:
top = self
self._refs = {}
self._xml_path = parse_data
self.top = top
@classmethod
def from_refid(cls, refid, top=None):
""" Instantiate class from a refid rather than parsing object. """
# First check to see if its already been instantiated.
if top is not None and refid in top._refs:
return top._refs[refid]
# Otherwise create a new instance and set refid.
inst = cls(None, top=top)
inst.refid = refid
inst.add_ref(inst)
return inst
@classmethod
def from_parse_data(cls, parse_data, top=None):
refid = getattr(parse_data, 'refid', None)
if refid is not None and top is not None and refid in top._refs:
return top._refs[refid]
inst = cls(parse_data, top=top)
if refid is not None:
inst.refid = refid
inst.add_ref(inst)
return inst
def add_ref(self, obj):
if hasattr(obj, 'refid'):
self.top._refs[obj.refid] = obj
mem_classes = []
def get_cls(self, mem):
for cls in self.mem_classes:
if cls.can_parse(mem):
return cls
raise Exception(("Did not find a class for object '%s'."
% (mem.get_name())))
def convert_mem(self, mem):
try:
cls = self.get_cls(mem)
converted = cls.from_parse_data(mem, self.top)
if converted is None:
raise Exception('No class matched this object.')
self.add_ref(converted)
return converted
except Exception as e:
print(e)
@classmethod
def includes(cls, inst):
return isinstance(inst, cls)
@classmethod
def can_parse(cls, obj):
return False
def _parse(self):
self._parsed = True
def _get_dict_members(self, cat=None):
"""
For given category a dictionary is returned mapping member names to
members of that category. For names that are duplicated the name is
mapped to None.
"""
self.confirm_no_error()
if cat not in self._dict_members:
new_dict = {}
for mem in self.in_category(cat):
if mem.name() not in new_dict:
new_dict[mem.name()] = mem
else:
new_dict[mem.name()] = self.Duplicate
self._dict_members[cat] = new_dict
return self._dict_members[cat]
def in_category(self, cat):
self.confirm_no_error()
if cat is None:
return self._members
if cat not in self._in_category:
self._in_category[cat] = [mem for mem in self._members
if cat.includes(mem)]
return self._in_category[cat]
def get_member(self, name, cat=None):
self.confirm_no_error()
# Check if it's in a namespace or class.
bits = name.split('::')
first = bits[0]
rest = '::'.join(bits[1:])
member = self._get_dict_members(cat).get(first, self.NoSuchMember)
# Raise any errors that are returned.
if member in set([self.NoSuchMember, self.Duplicate]):
raise member()
if rest:
return member.get_member(rest, cat=cat)
return member
def has_member(self, name, cat=None):
try:
mem = self.get_member(name, cat=cat)
return True
except self.NoSuchMember:
return False
def METHOD_NAME(self):
self.confirm_no_error()
return self._data
def members(self):
self.confirm_no_error()
return self._members
def process_memberdefs(self):
mdtss = []
for sec in self._retrieved_data.compounddef.sectiondef:
mdtss += sec.memberdef
# At the moment we lose all information associated with sections.
# Sometimes a memberdef is in several sectiondef.
# We make sure we don't get duplicates here.
uniques = set([])
for mem in mdtss:
converted = self.convert_mem(mem)
pair = (mem.name, mem.__class__)
if pair not in uniques:
uniques.add(pair)
self._members.append(converted)
def retrieve_data(self):
filename = os.path.join(self._xml_path, self.refid + '.xml')
try:
self._retrieved_data = compound.parse(filename)
except ExpatError:
print('Error in xml in file %s' % filename)
self._error = True
self._retrieved_data = None
def check_parsed(self):
if not self._parsed:
self._parse()
def confirm_no_error(self):
self.check_parsed()
if self._error:
raise self.ParsingError()
def error(self):
self.check_parsed()
return self._error
def name(self):
# first see if we can do it without processing.
if self._parse_data is not None:
return self._parse_data.name
self.check_parsed()
return self._retrieved_data.compounddef.name |
299,772 | backup name | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'ListWebAppBackupConfigurationResult',
'AwaitableListWebAppBackupConfigurationResult',
'list_web_app_backup_configuration',
'list_web_app_backup_configuration_output',
]
@pulumi.output_type
class ListWebAppBackupConfigurationResult:
"""
Description of a backup which will be performed.
"""
def __init__(__self__, METHOD_NAME=None, backup_schedule=None, databases=None, enabled=None, id=None, kind=None, name=None, storage_account_url=None, system_data=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'backup_name' to be a str")
pulumi.set(__self__, "backup_name", METHOD_NAME)
if backup_schedule and not isinstance(backup_schedule, dict):
raise TypeError("Expected argument 'backup_schedule' to be a dict")
pulumi.set(__self__, "backup_schedule", backup_schedule)
if databases and not isinstance(databases, list):
raise TypeError("Expected argument 'databases' to be a list")
pulumi.set(__self__, "databases", databases)
if enabled and not isinstance(enabled, bool):
raise TypeError("Expected argument 'enabled' to be a bool")
pulumi.set(__self__, "enabled", enabled)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if storage_account_url and not isinstance(storage_account_url, str):
raise TypeError("Expected argument 'storage_account_url' to be a str")
pulumi.set(__self__, "storage_account_url", storage_account_url)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="backupName")
def METHOD_NAME(self) -> Optional[str]:
"""
Name of the backup.
"""
return pulumi.get(self, "backup_name")
@property
@pulumi.getter(name="backupSchedule")
def backup_schedule(self) -> Optional['outputs.BackupScheduleResponse']:
"""
Schedule for the backup if it is executed periodically.
"""
return pulumi.get(self, "backup_schedule")
@property
@pulumi.getter
def databases(self) -> Optional[Sequence['outputs.DatabaseBackupSettingResponse']]:
"""
Databases included in the backup.
"""
return pulumi.get(self, "databases")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
True if the backup schedule is enabled (must be included in that case), false if the backup schedule should be disabled.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="storageAccountUrl")
def storage_account_url(self) -> str:
"""
SAS URL to the container.
"""
return pulumi.get(self, "storage_account_url")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableListWebAppBackupConfigurationResult(ListWebAppBackupConfigurationResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListWebAppBackupConfigurationResult(
METHOD_NAME=self.METHOD_NAME,
backup_schedule=self.backup_schedule,
databases=self.databases,
enabled=self.enabled,
id=self.id,
kind=self.kind,
name=self.name,
storage_account_url=self.storage_account_url,
system_data=self.system_data,
type=self.type)
def list_web_app_backup_configuration(name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListWebAppBackupConfigurationResult:
"""
Gets the backup configuration of an app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:web/v20201001:listWebAppBackupConfiguration', __args__, opts=opts, typ=ListWebAppBackupConfigurationResult).value
return AwaitableListWebAppBackupConfigurationResult(
METHOD_NAME=pulumi.get(__ret__, 'backup_name'),
backup_schedule=pulumi.get(__ret__, 'backup_schedule'),
databases=pulumi.get(__ret__, 'databases'),
enabled=pulumi.get(__ret__, 'enabled'),
id=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
name=pulumi.get(__ret__, 'name'),
storage_account_url=pulumi.get(__ret__, 'storage_account_url'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(list_web_app_backup_configuration)
def list_web_app_backup_configuration_output(name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListWebAppBackupConfigurationResult]:
"""
Gets the backup configuration of an app.
:param str name: Name of the app.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
... |
299,773 | compute query dict | import cudf
import dask_cudf
import logging
import panel as pn
from bokeh.models import ColumnDataSource
from panel.config import panel_extension
from typing import Dict, Literal
from ...assets import datetime as dt
class BaseChart:
chart_type: str = None
x: str = None
y: str = None
aggregate_fn: str = "count"
color: str = None
add_interaction: bool = True
chart = None
source = None
source_backup = None
data_points: int = 0
filter_widget = None
_library_specific_params: Dict[str, str] = {}
stride = None
stride_type = int
min_value: float = 0.0
max_value: float = 0.0
x_label_map = None
y_label_map = None
_initialized = False
# widget=False can only be rendered the main layout
is_widget = False
title = ""
_renderer_mode: Literal["web-app", "notebook"] = "web-app"
@property
def renderer_mode(self):
return self._renderer_mode
@renderer_mode.setter
def renderer_mode(self, value):
valid_values = ["web-app", "notebook"]
if value not in valid_values:
raise ValueError(
f"""Invalid value '{value}'. Value must be one of
{valid_values}."""
)
self._renderer_mode = value
@property
def name(self):
chart_type = self.chart_type if self.chart_type else "chart"
return f"{self.x}_{chart_type}_{self.title}"
@property
def library_specific_params(self):
return self._library_specific_params
@property
def x_dtype(self):
if isinstance(self.source, ColumnDataSource):
return self.source.data[self.data_x_axis].dtype
elif isinstance(self.source, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.source[self.x].dtype
return None
@property
def y_dtype(self):
if isinstance(self.source, ColumnDataSource):
return self.source.data[self.data_x_axis].dtype
elif isinstance(self.source, (cudf.DataFrame, dask_cudf.DataFrame)):
return self.source[self.y].dtype
return None
@library_specific_params.setter
def library_specific_params(self, value):
self._library_specific_params = value
self.extract_mappers()
self.set_color()
def set_color(self):
if "color" in self.library_specific_params:
self.color = self.library_specific_params["color"]
def extract_mappers(self):
if "x_label_map" in self.library_specific_params:
self.x_label_map = self.library_specific_params["x_label_map"]
self.library_specific_params.pop("x_label_map")
if "y_label_map" in self.library_specific_params:
self.y_label_map = self.library_specific_params["y_label_map"]
self.library_specific_params.pop("y_label_map")
def _repr_mimebundle_(self, include=None, exclude=None):
view = self.view()
if self._initialized and panel_extension._loaded:
return view._repr_mimebundle_(include, exclude)
if self._initialized is False:
logging.warning(
"dashboard has not been initialized."
"Please run cuxfilter.dashboard.Dashboard([...charts])"
" to view this object in notebook"
)
if panel_extension._loaded is False:
logging.warning(
"notebooks assets not loaded."
"Please run cuxfilter.load_notebooks_assets()"
" to view this object in notebook"
)
if isinstance(view, pn.Column):
return view.pprint()
return None
def _to_xaxis_type(self, dates):
"""
Description: convert to int64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: cudf.Series | list | tuple
"""
return dt.to_int64_if_datetime(dates, self.x_dtype)
def _to_yaxis_type(self, dates):
"""
Description: convert to int64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: cudf.Series | list | tuple
"""
return dt.to_int64_if_datetime(dates, self.y_dtype)
def _xaxis_dt_transform(self, dates):
"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of integer timestamps objects
"""
return dt.to_dt_if_datetime(dates, self.x_dtype)
def _yaxis_dt_transform(self, dates):
"""
Description: convert to datetime64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of integer timestamps objects
"""
return dt.to_dt_if_datetime(dates, self.y_dtype)
def _xaxis_np_dt64_transform(self, dates):
"""
Description: convert to datetime64 if self.x_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""
return dt.to_np_dt64_if_datetime(dates, self.x_dtype)
def _yaxis_np_dt64_transform(self, dates):
"""
Description: convert to datetime64 if self.y_dtype is of type datetime
-----------------------------------------------------------------
Input:
dates: list | tuple of datetime.datetime objects
"""
return dt.to_np_dt64_if_datetime(dates, self.y_dtype)
def _xaxis_stride_type_transform(self, stride_type):
"""
Description: return stride_type=CUDF_TIMEDELTA_TYPE if self.x_dtype is
of type datetime, else return stride_type
"""
return dt.transform_stride_type(stride_type, self.x_dtype)
def _yaxis_stride_type_transform(self, stride_type):
"""
Description: return stride_type=CUDF_TIMEDELTA_TYPE if self.y_dtype is
of type datetime else return stride_type
"""
return dt.transform_stride_type(stride_type, self.y_dtype)
def view(self, width=600, height=400):
return pn.panel(self.chart, width=width, height=height)
def get_dashboard_view(self):
return self.view()
def calculate_source(self, data):
print("base calc source function, to over-ridden by delegated classes")
return -1
def generate_chart(self, **kwargs):
print("base calc source function, to over-ridden by delegated classes")
return -1
def add_reset_event(self, callback=None):
print("base calc source function, to over-ridden by delegated classes")
return -1
def METHOD_NAME(self, query_dict):
print("base calc source function, to over-ridden by delegated classes")
return -1
def reset_chart(self, data: list = []):
print("base calc source function, to over-ridden by delegated classes")
return -1
def reload_chart(self, data):
print("base calc source function, to over-ridden by delegated classes")
return -1
def format_source_data(self, source_dict):
""""""
# print('function to be overridden by library specific extensions')
return -1
def apply_mappers(self):
""""""
# print('function to be overridden by library specific extensions')
return -1 |
299,774 | test to order first | #!/usr/bin/env python3
###############################################################################
# #
# RMG - Reaction Mechanism Generator #
# #
# Copyright (c) 2002-2023 Prof. William H. Green (whgreen@mit.edu), #
# Prof. Richard H. West (r.west@neu.edu) and the RMG Team (rmg_dev@mit.edu) #
# #
# Permission is hereby granted, free of charge, to any person obtaining a #
# copy of this software and associated documentation files (the 'Software'), #
# to deal in the Software without restriction, including without limitation #
# the rights to use, copy, modify, merge, publish, distribute, sublicense, #
# and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in #
# all copies or substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER #
# DEALINGS IN THE SOFTWARE. #
# #
###############################################################################
"""
This script contains unit tests of the :mod:`rmgpy.kinetics.model` module.
"""
from rmgpy.kinetics.model import (
get_reaction_order_from_rate_coefficient_units,
get_rate_coefficient_units_from_reaction_order,
KineticsModel,
)
from rmgpy.kinetics.uncertainties import RateUncertainty
class TestKineticsModel:
"""
Contains unit tests of the KineticsModel class
"""
def setup_class(self):
self.Tmin = 300.0
self.Tmax = 3000.0
self.Pmin = 0.1
self.Pmax = 100.0
self.comment = "foo bar"
self.uncertainty = RateUncertainty(mu=0.3, var=0.6, Tref=1000.0, N=1, correlation="ab")
self.km = KineticsModel(
Tmin=(self.Tmin, "K"),
Tmax=(self.Tmax, "K"),
Pmin=(self.Pmin, "bar"),
Pmax=(self.Pmax, "bar"),
uncertainty=self.uncertainty,
comment=self.comment,
)
def test_is_identical_to(self):
"""
Test that the KineticsModel.is_identical_to method works on itself.
This just checks the Temperature range
"""
assert self.km.is_identical_to(self.km)
import copy
km = copy.deepcopy(self.km)
assert self.km.is_identical_to(self.km)
km.Tmax = (self.Tmax - 50, "K") # discrepancy must be more than 1%!
assert not self.km.is_identical_to(km)
def test_repr(self):
"""
Test that an KineticsModel object can be reconstructed from its repr()
output with no loss of information.
"""
namespace = {}
exec("km = {0!r}".format(self.km), globals(), namespace)
assert "km" in namespace
km = namespace["km"]
assert self.km.is_identical_to(km)
assert dir(self.km) == dir(km)
for att in "Tmax Tmin Pmax Pmin comment uncertainty".split():
assert repr(getattr(self.km, att)) == repr(getattr(km, att))
def test_pickle(self):
"""
Test that an KineticsModel object can be pickled and unpickled
with no loss of information.
"""
import pickle
km = pickle.loads(pickle.dumps(self.km, -1))
assert self.km.is_identical_to(km)
assert dir(self.km) == dir(km)
for att in "Tmax Tmin Pmax Pmin comment uncertainty".split():
assert repr(getattr(self.km, att)) == repr(getattr(km, att))
class TestOrder:
"""
Contains unit tests of the functions for converting rate coefficient units
to/from reaction orders.
"""
def test_to_order_zeroth(self):
"""
Test the conversion of zeroth-order rate coefficient units to an integer
reaction order.
"""
assert 0 == get_reaction_order_from_rate_coefficient_units("mol/(m^3*s)")
assert 0 == get_reaction_order_from_rate_coefficient_units("mol/(cm^3*s)")
assert 0 == get_reaction_order_from_rate_coefficient_units("molecule/(m^3*s)")
assert 0 == get_reaction_order_from_rate_coefficient_units("molecule/(cm^3*s)")
def METHOD_NAME(self):
"""
Test the conversion of first-order rate coefficient units to an integer
reaction order.
"""
assert 1 == get_reaction_order_from_rate_coefficient_units("s^-1")
def test_to_order_second(self):
"""
Test the conversion of second-order rate coefficient units to an integer
reaction order.
"""
assert 2 == get_reaction_order_from_rate_coefficient_units("m^3/(mol*s)")
assert 2 == get_reaction_order_from_rate_coefficient_units("cm^3/(mol*s)")
assert 2 == get_reaction_order_from_rate_coefficient_units("m^3/(molecule*s)")
assert 2 == get_reaction_order_from_rate_coefficient_units("cm^3/(molecule*s)")
def test_to_order_third(self):
"""
Test the conversion of third-order rate coefficient units to an integer
reaction order.
"""
assert 3 == get_reaction_order_from_rate_coefficient_units("m^6/(mol^2*s)")
assert 3 == get_reaction_order_from_rate_coefficient_units("cm^6/(mol^2*s)")
assert 3 == get_reaction_order_from_rate_coefficient_units("m^6/(molecule^2*s)")
assert 3 == get_reaction_order_from_rate_coefficient_units("cm^6/(molecule^2*s)")
def test_to_units_zeroth(self):
"""
Test the conversion of a reaction order of zero to rate coefficient
units.
"""
assert "mol/(m^3*s)" == get_rate_coefficient_units_from_reaction_order(0)
def test_to_units_first(self):
"""
Test the conversion of a reaction order of one to rate coefficient
units.
"""
assert "s^-1" == get_rate_coefficient_units_from_reaction_order(1)
def test_to_units_second(self):
"""
Test the conversion of a reaction order of two to rate coefficient
units.
"""
assert "m^3/(mol*s)" == get_rate_coefficient_units_from_reaction_order(2)
def test_to_units_third(self):
"""
Test the conversion of a reaction order of three to rate coefficient
units.
"""
assert "m^6/(mol^2*s)" == get_rate_coefficient_units_from_reaction_order(3) |
299,775 | generate perf endpoints | from lnst.Common.Parameters import (
Param,
IntParam,
IPv4NetworkParam,
IPv6NetworkParam,
)
from lnst.Common.IpAddress import interface_addresses
from lnst.Controller import HostReq, DeviceReq, RecipeParam
from lnst.Recipes.ENRT.VirtualEnrtRecipe import VirtualEnrtRecipe
from lnst.Recipes.ENRT.ConfigMixins.OffloadSubConfigMixin import (
OffloadSubConfigMixin)
from lnst.Recipes.ENRT.ConfigMixins.CommonHWSubConfigMixin import (
CommonHWSubConfigMixin)
from lnst.RecipeCommon.Ping.PingEndpoints import PingEndpoints
from lnst.Devices import VlanDevice
from lnst.Devices import BridgeDevice
class VirtualBridgeVlanInHostRecipe(CommonHWSubConfigMixin,
OffloadSubConfigMixin, VirtualEnrtRecipe):
host1 = HostReq()
host1.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
host1.tap0 = DeviceReq(label="to_guest")
host2 = HostReq()
host2.eth0 = DeviceReq(label="to_switch", driver=RecipeParam("driver"))
guest1 = HostReq()
guest1.eth0 = DeviceReq(label="to_guest")
vlan_id = IntParam(default=10)
offload_combinations = Param(default=(
dict(gro="on", gso="on", tso="on", tx="on", rx="on"),
dict(gro="off", gso="on", tso="on", tx="on", rx="on"),
dict(gro="on", gso="off", tso="off", tx="on", rx="on"),
dict(gro="on", gso="on", tso="off", tx="off", rx="on"),
dict(gro="on", gso="on", tso="on", tx="on", rx="off")))
net_ipv4 = IPv4NetworkParam(default="192.168.10.0/24")
net_ipv6 = IPv6NetworkParam(default="fc00:0:0:1::/64")
def test_wide_configuration(self):
host1, host2, guest1 = (self.matched.host1, self.matched.host2,
self.matched.guest1)
host1.eth0.down()
host1.tap0.down()
host1.br0 = BridgeDevice()
host1.br0.slave_add(host1.tap0)
host2.eth0.down()
guest1.eth0.down()
host1.vlan0 = VlanDevice(realdev=host1.eth0, vlan_id=self.params.vlan_id,
master=host1.br0)
host2.vlan0 = VlanDevice(realdev=host2.eth0, vlan_id=self.params.vlan_id)
configuration = super().test_wide_configuration()
configuration.test_wide_devices = [guest1.eth0, host2.vlan0,
host1.br0]
ipv4_addr = interface_addresses(self.params.net_ipv4)
ipv6_addr = interface_addresses(self.params.net_ipv6, default_start="fc00:0:0:1::2/64")
host1.br0.ip_add(next(ipv4_addr))
for i, dev in enumerate([host2.vlan0, guest1.eth0]):
dev.ip_add(next(ipv4_addr))
dev.ip_add(next(ipv6_addr))
for dev in [host1.eth0, host1.tap0, host1.vlan0, host1.br0,
host2.eth0, host2.vlan0, guest1.eth0]:
dev.up()
self.wait_tentative_ips(configuration.test_wide_devices)
return configuration
def generate_test_wide_description(self, config):
host1, host2 = self.matched.host1, self.matched.host2
desc = super().generate_test_wide_description(config)
desc += [
"\n".join([
"Configured {}.{}.ips = {}".format(
dev.host.hostid, dev.name, dev.ips
)
for dev in config.test_wide_devices
]),
"\n".join([
"Configured {}.{}.vlan_id = {}".format(
dev.host.hostid, dev.name, dev.vlan_id
)
for dev in [host1.vlan0, host2.vlan0]
]),
"\n".join([
"Configured {}.{}.realdev = {}".format(
dev.host.hostid, dev.name,
'.'.join([dev.host.hostid, dev.realdev.name])
)
for dev in [host1.vlan0, host2.vlan0]
]),
"Configured {}.{}.slaves = {}".format(
host1.hostid, host1.br0.name,
['.'.join([host1.hostid, slave.name])
for slave in host1.br0.slaves]
)
]
return desc
def test_wide_deconfiguration(self, config):
del config.test_wide_devices
super().test_wide_deconfiguration(config)
def generate_ping_endpoints(self, config):
return [PingEndpoints(self.matched.guest1.eth0, self.matched.host2.vlan0)]
def METHOD_NAME(self, config):
return [(self.matched.guest1.eth0, self.matched.host2.vlan0)]
@property
def offload_nics(self):
return [self.matched.host1.eth0, self.matched.host2.eth0,
self.matched.guest1.eth0]
@property
def mtu_hw_config_dev_list(self):
host1, host2, guest1 = (self.matched.host1, self.matched.host2,
self.matched.guest1)
result = []
for dev in [host1.eth0, host1.tap0, host1.br0, host2.eth0,
guest1.eth0, host1.vlan0, host2.vlan0]:
result.append(dev)
return result
@property
def dev_interrupt_hw_config_dev_list(self):
return [self.matched.host1.eth0, self.matched.host2.eth0]
@property
def parallel_stream_qdisc_hw_config_dev_list(self):
return [self.matched.host1.eth0, self.matched.host2.eth0] |
299,776 | std | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..arithmetic.sqrt import sqrt
from .var import var
def METHOD_NAME(a, axis=None, dtype=None, out=None, ddof=0, keepdims=None, combine_size=None):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the tensor elements. The standard deviation is computed for the
flattened tensor by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened tensor.
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For tensors of
integer type the default is float64, for tensors of float types it is
the same as the array type.
out : Tensor, optional
Alternative output tensor in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input tensor.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`Tensor`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
combine_size: int, optional
The number of chunks to combine.
Returns
-------
standard_deviation : Tensor, see dtype parameter above.
If `out` is None, return a new tensor containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean, nanmean, nanstd, nanvar
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as
``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is specified,
the divisor ``N - ddof`` is used instead. In standard statistical
practice, ``ddof=1`` provides an unbiased estimator of the variance
of the infinite population. ``ddof=0`` provides a maximum likelihood
estimate of the variance for normally distributed variables. The
standard deviation computed in this function is the square root of
the estimated variance, so even with ``ddof=1``, it will not be an
unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[1, 2], [3, 4]])
>>> mt.std(a).execute()
1.1180339887498949
>>> mt.std(a, axis=0).execute()
array([ 1., 1.])
>>> mt.std(a, axis=1).execute()
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = mt.zeros((2, 512*512), dtype=mt.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> mt.std(a).execute()
0.45000005
Computing the standard deviation in float64 is more accurate:
>>> mt.std(a, dtype=mt.float64).execute()
0.44999999925494177
"""
ret = sqrt(
var(
a,
axis=axis,
dtype=dtype,
out=out,
ddof=ddof,
keepdims=keepdims,
combine_size=combine_size,
)
)
if dtype is not None and ret.dtype != dtype:
ret = ret.astype(dtype)
return ret |
299,777 | setup cli | # This file is part of Indico.
# Copyright (C) 2002 - 2023 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import os
import sys
from functools import partial
import alembic.command
import click
from flask import current_app
from flask.cli import with_appcontext
from flask_migrate.cli import db as flask_migrate_cli
import indico
from indico.cli.core import cli_group
from indico.core.db import db
from indico.core.db.sqlalchemy.migration import PluginScriptDirectory, migrate, prepare_db
from indico.core.db.sqlalchemy.util.management import get_all_tables
from indico.core.plugins import plugin_engine
from indico.util.console import cformat
@cli_group()
@click.option('--plugin', metavar='PLUGIN', help='Execute the command for the given plugin')
@click.option('--all-plugins', is_flag=True, help='Execute the command for all plugins')
@click.pass_context
@with_appcontext
def cli(ctx, plugin=None, all_plugins=False):
if plugin and all_plugins:
raise click.BadParameter('cannot combine --plugin and --all-plugins')
if all_plugins and ctx.invoked_subcommand in ('migrate', 'revision', 'downgrade', 'stamp', 'edit'):
raise click.UsageError('this command requires an explicit plugin')
if (all_plugins or plugin) and ctx.invoked_subcommand == 'prepare':
raise click.UsageError('this command is not available for plugins (use `upgrade` instead)')
if plugin and not plugin_engine.get_plugin(plugin):
raise click.BadParameter('plugin does not exist or is not loaded', param_hint='plugin')
migrate.init_app(current_app, db, os.path.join(current_app.root_path, 'migrations'))
@cli.command()
@click.option('--force', is_flag=True, help='Force using an older Postgres version')
def prepare(force=False):
"""Initialize a new database (creates tables, sets alembic rev to HEAD)."""
if not prepare_db(force=force):
sys.exit(1)
def _stamp(plugin=None, revision=None):
table = 'alembic_version' if not plugin else f'alembic_version_plugin_{plugin}'
db.session.execute(f'DELETE FROM {table}')
if revision:
db.session.execute(f'INSERT INTO {table} VALUES (:revision)', {'revision': revision})
@cli.command()
def reset_alembic():
"""Reset the alembic state carried over from 1.9.x.
Only run this command right after upgrading from a 1.9.x version
so the references to old alembic revisions (which were removed in
2.0) are reset.
"""
tables = get_all_tables(db)['public']
if 'alembic_version' not in tables:
print('No alembic_version table found')
sys.exit(1)
current_revs = [rev for rev, in db.session.execute('SELECT version_num FROM alembic_version').fetchall()]
if current_revs != ['65c079b091bf']:
print('Your database is not at the latest 1.9.11 revision (got [{}], expected [65c079b091bf]).'
.format(', '.join(current_revs)))
print('This can have multiple reasons:')
print('1) You did not upgrade from 1.9.x, so you do not need this command')
print('2) You have already executed the script')
print('3) You did not fully upgrade to the latest 1.9.11 revision before upgrading to 2.x')
print('In case of (3), you need to install v1.9.11 and then upgrade the database before updating Indico back '
'to {}'.format(indico.__version__))
sys.exit(1)
plugins = sorted(x[23:] for x in tables if x.startswith('alembic_version_plugin_'))
print('Resetting core alembic state...')
_stamp()
print('Plugins found: {}'.format(', '.join(plugins)))
no_revision_plugins = {'audiovisual', 'payment_cern'}
for plugin in no_revision_plugins:
# All revisions were just data migrations -> get rid of them
if plugin not in plugins:
continue
print(f'[{plugin}] Deleting revision table')
db.session.execute(f'DROP TABLE alembic_version_plugin_{plugin}')
plugin_revisions = {'chat': '3888761f35f7',
'livesync': 'aa0dbc6c14aa',
'outlook': '6093a83228a7',
'vc_vidyo': '6019621fea50'}
for plugin, revision in plugin_revisions.items():
if plugin not in plugins:
continue
print(f'[{plugin}] Stamping to new revision')
_stamp(plugin, revision)
db.session.commit()
def _safe_downgrade(*args, **kwargs):
func = kwargs.pop('_func')
print(cformat('%{yellow!}*** DANGER'))
print(cformat('%{yellow!}***%{reset} '
'%{red!}This operation may %{yellow!}PERMANENTLY ERASE %{red!}some data!%{reset}'))
if current_app.debug:
skip_confirm = os.environ.get('INDICO_ALWAYS_DOWNGRADE', '').lower() in ('1', 'yes')
print(cformat('%{yellow!}***%{reset} '
"%{green!}Debug mode is active, so you probably won't destroy valuable data"))
else:
skip_confirm = False
print(cformat('%{yellow!}***%{reset} '
'%{red!}Debug mode is NOT ACTIVE, so make sure you are on the right machine!'))
if not skip_confirm and input(cformat('%{yellow!}***%{reset} '
'To confirm this, enter %{yellow!}YES%{reset}: ')) != 'YES':
click.secho('Aborted', fg='green')
sys.exit(1)
else:
return func(*args, **kwargs)
def _call_with_plugins(*args, **kwargs):
func = kwargs.pop('_func')
ctx = click.get_current_context()
all_plugins = ctx.parent.params['all_plugins']
plugin = ctx.parent.params['plugin']
if plugin:
plugins = {plugin_engine.get_plugin(plugin)}
elif all_plugins:
plugins = set(plugin_engine.get_active_plugins().values())
else:
plugins = None
if plugins is None:
func(*args, **kwargs)
else:
PluginScriptDirectory.dir = os.path.join(current_app.root_path, 'core', 'plugins', 'alembic')
alembic.command.ScriptDirectory = PluginScriptDirectory
for plugin in plugins:
if not os.path.exists(plugin.alembic_versions_path):
click.secho(f"skipping plugin '{plugin.name}' (no migrations folder)", fg='cyan')
continue
click.secho(f"executing command for plugin '{plugin.name}'", fg='cyan', bold=True)
with plugin.plugin_context():
func(*args, **kwargs)
def METHOD_NAME():
for command in flask_migrate_cli.commands.values():
if command.name == 'init':
continue
command.callback = partial(with_appcontext(_call_with_plugins), _func=command.callback)
if command.name == 'downgrade':
command.callback = partial(with_appcontext(_safe_downgrade), _func=command.callback)
cli.add_command(command)
METHOD_NAME()
del METHOD_NAME |
299,778 | install | #
# Copyright (c) 2019 Nathaniel Filardo
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory (Department of Computer Science and
# Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the
# DARPA SSITH research programme.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
from .crosscompileproject import CrossCompileProject, DefaultInstallDir, GitRepository, MakeCommandKind
from ...processutils import commandline_to_str
class DLMalloc(CrossCompileProject):
target = "dlmalloc"
repository = GitRepository("https://github.com/CTSRD-CHERI/dlmalloc_nonreuse")
make_kind = MakeCommandKind.GnuMake
native_install_dir = DefaultInstallDir.CHERI_SDK
@classmethod
def setup_config_options(cls, **kwargs):
super().setup_config_options(**kwargs)
cls.just_so = cls.add_bool_option("just-so", help="Just build the .so shim")
cls.debug = cls.add_bool_option("debug", help="Turn on debugging features")
cls.cheri_set_bounds = cls.add_bool_option("cheri-bounds", default=True, help="Set bounds on allocations")
cls.qmabs = cls.add_config_option("qmabs", kind=int,
help="Quarantine memory absolute threshold")
cls.qmratio = cls.add_config_option("qmratio", kind=float,
help="Quarantine memory ratio threshold")
cls.qmmin = cls.add_config_option("qmmin", kind=int,
help="Minimum amount quarantined to trigger a revocation based on ratio")
cls.revoke = cls.add_bool_option("revoke", help="Revoke quarantine before reusing")
cls.consolidate_on_free = cls.add_bool_option("consolidate", default=True,
help="Consolidate memory when quarantining")
cls.zero_memory = cls.add_bool_option("zero-memory", help="Zero allocated memory")
cls.stats_at_exit = cls.add_bool_option("stats-at-exit", default=True, help="print statistics on exit")
cls.unmap_support = cls.add_bool_option("unmap-support", default=True, help="support for unmapping")
cls.unmap_threshold = cls.add_config_option("unmap-threshold", kind=int,
help="Threshold (in pages) at which interior pages of quanantined "
"chunks are unmapped")
cls.quar_unsafe = cls.add_bool_option("unsafe-quarantine",
help="Don't isolate quarantine structures")
def setup(self):
super().setup()
if self.cheri_set_bounds:
self.CFLAGS.append("-DCHERI_SET_BOUNDS")
if self.revoke:
self.CFLAGS.append("-DCAPREVOKE")
if self.qmabs:
self.CFLAGS.append("-DDEFAULT_MAX_FREEBUFBYTES=%d" % self.qmabs)
if self.qmratio:
self.CFLAGS.append("-DDEFAULT_FREEBUF_PERCENT=%f" % self.qmratio)
if self.qmmin:
self.CFLAGS.append("-DDEFAULT_MIN_FREEBUFBYTES=%d" % self.qmmin)
if self.consolidate_on_free:
self.CFLAGS.append("-DCONSOLIDATE_ON_FREE=1")
else:
self.CFLAGS.append("-DCONSOLIDATE_ON_FREE=0")
if self.zero_memory:
self.CFLAGS.append("-DZERO_MEMORY=1")
else:
self.CFLAGS.append("-DZERO_MEMORY=0")
if self.unmap_support:
self.CFLAGS.append("-DSUPPORT_UNMAP=1")
else:
self.CFLAGS.append("-DSUPPORT_UNMAP=0")
if self.unmap_threshold:
self.CFLAGS.append("-DDEFAULT_UNMAP_THRESHOLD=%d" % self.unmap_threshold)
if not self.quar_unsafe:
self.CFLAGS.append("-DSAFE_FREEBUF")
if self.stats_at_exit:
self.CFLAGS.append("-DSWEEP_STATS=1")
self.make_args.add_flags("-f", self.source_dir / "Makefile.cheribuild")
self.make_args.set(DEBUG=self.debug)
self.make_args.set(CAPREVOKE=self.revoke)
self.make_args.set(SRCDIR=self.source_dir)
self.make_args.set_env(CC=self.CC, CFLAGS=commandline_to_str(self.default_compiler_flags + self.CFLAGS))
if not self.compiling_for_host():
self.make_args.set_env(CHERI_SDK=self.target_info.sdk_root_dir)
def compile(self, **kwargs):
if self.just_so:
self.run_make("libdlmalloc_nonreuse.so", cwd=self.build_dir)
else:
self.run_make("all", cwd=self.build_dir)
def METHOD_NAME(*args, **kwargs):
pass |
299,779 | get thermocycler link | """Model for the screen of Labware Setup."""
from rich.console import Console
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webelement import WebElement
from automation.driver.base import Base, Element
class LabwareSetup:
"""Labware setup on setup for run."""
def __init__(self, driver: WebDriver, console: Console, execution_id: str) -> None:
"""Initialize with driver."""
self.base: Base = Base(driver, console, execution_id)
self.console: Console = console
labware_setup_text_locator: Element = Element(
(
By.ID,
"CollapsibleStep_Labware Setup",
),
"todo description",
)
securing_labware_to_magnetic_module_link: Element = Element(
(
By.ID,
"ExtraAttentionWarning_magnetic_module_link",
),
"todo description",
)
securing_labware_to_thermocycler_link: Element = Element(
(
By.ID,
"ExtraAttentionWarning_thermocycler_link",
),
"todo description",
)
magnetic_module_modal: Element = Element(
(
By.XPATH,
"//h3[text()='How To Secure Labware to the Magnetic Module']",
),
"todo description",
)
thermocycler_module_modal: Element = Element(
(
By.XPATH,
"//h3[text()='How To Secure Labware to the Thermocycler']",
),
"todo description",
)
close_button: Element = Element(
(
By.XPATH,
'//div[contains(normalize-space(@class), "modals")]//button[text()="close"]',
),
"todo description",
)
proceed_to_run_button: Element = Element(
(By.ID, "LabwareSetup_proceedToRunButton"),
"todo description",
)
start_run_button: Element = Element(
(
By.XPATH,
"//p[text()='Start Run']",
),
"todo description",
)
run_again_button: Element = Element(
(
By.XPATH,
"//p[text()='Run Again']",
),
"todo description",
)
protocol_complete_banner: Element = Element(
(
By.XPATH,
"//span[text()='Protocol run complete']",
),
"todo description",
)
close_protocol_text_locator: Element = Element(
(
By.XPATH,
"//button[text()='close']",
),
"todo description",
)
yes_close_now_text_locator: Element = Element(
(
By.XPATH,
"//button[text()='Yes, close now']",
),
"todo description",
)
def get_labware_setup_text(self) -> WebElement:
"""Locator for labware setup text."""
return self.base.clickable_wrapper(LabwareSetup.labware_setup_text_locator)
def get_magnetic_module_link(self) -> WebElement:
"""Locator for securing labware to magentic module link."""
return self.base.clickable_wrapper(LabwareSetup.securing_labware_to_magnetic_module_link)
def click_magnetic_module_link(self) -> None:
"""Click magnetic module link."""
self.base.click(LabwareSetup.securing_labware_to_magnetic_module_link)
def METHOD_NAME(self) -> WebElement:
"""Locator for securing labware to thermocycler module link."""
return self.base.clickable_wrapper(LabwareSetup.securing_labware_to_thermocycler_link)
def click_thermocycler_module_link(self) -> None:
"""Click thermocycler module link."""
self.base.click(LabwareSetup.securing_labware_to_thermocycler_link)
def get_magnetic_module_modal_text(self) -> WebElement:
"""Locator for magnetic module modal."""
return self.base.clickable_wrapper(LabwareSetup.magnetic_module_modal)
def get_thermocycler_module_modal_text(self) -> WebElement:
"""Locator for thermocycler module modal."""
return self.base.clickable_wrapper(LabwareSetup.thermocycler_module_modal)
def get_close_button(self) -> WebElement:
"""Locator for close button."""
toggle: WebElement = self.base.clickable_wrapper(LabwareSetup.close_button)
actions = ActionChains(self.base.driver) # type: ignore
actions.move_to_element(toggle).perform() # type: ignore
return toggle
def click_close_button(self) -> None:
"""Click close button."""
toggle: WebElement = self.base.clickable_wrapper(LabwareSetup.close_button)
actions = ActionChains(self.base.driver) # type: ignore
actions.move_to_element(toggle).perform() # type: ignore
self.base.click(LabwareSetup.close_button)
def get_proceed_to_run_button(self) -> WebElement:
"""Locator for proceed to run button."""
scroll: WebElement = self.base.clickable_wrapper(LabwareSetup.proceed_to_run_button)
actions = ActionChains(self.base.driver) # type: ignore
actions.move_to_element(scroll).perform() # type: ignore
return scroll
def click_proceed_to_run_button(self) -> None:
"""Click proceed to run."""
scroll: WebElement = self.base.clickable_wrapper(LabwareSetup.proceed_to_run_button)
actions = ActionChains(self.base.driver) # type: ignore
actions.move_to_element(scroll).perform() # type: ignore
self.base.click(LabwareSetup.proceed_to_run_button)
def get_start_run_button(self) -> WebElement:
"""Locator for start run button."""
return self.base.clickable_wrapper(LabwareSetup.start_run_button)
def click_start_run_button(self) -> None:
"""Click start run."""
self.base.click(LabwareSetup.start_run_button)
def get_run_again_button(self) -> WebElement:
"""Locator for run again button."""
return self.base.clickable_wrapper(LabwareSetup.run_again_button)
def get_protocol_complete_banner(self) -> WebElement:
"""Locator for protocol complete banner."""
return self.base.clickable_wrapper(LabwareSetup.protocol_complete_banner)
def get_protocol_close_button(self) -> WebElement:
"""Locator for protocol close button."""
return self.base.clickable_wrapper(LabwareSetup.close_protocol_text_locator)
def get_confirmation_close_button(self) -> WebElement:
"""Locator for yes close now button."""
return self.base.clickable_wrapper(LabwareSetup.yes_close_now_text_locator)
def click_protocol_close_button(self) -> None:
"""Click protocol close."""
self.base.click(LabwareSetup.close_protocol_text_locator)
def click_confirmation_close_button(self) -> None:
"""Click confirmation close."""
self.base.click(LabwareSetup.yes_close_now_text_locator)
def click_labware_setup_text(self) -> None:
"""Click labware setup text."""
self.base.click(LabwareSetup.labware_setup_text_locator) |
299,780 | update | """gr.ColorPicker() component."""
from __future__ import annotations
from typing import Any, Callable, Literal
from gradio_client.documentation import document, set_documentation_group
from gradio_client.serializing import StringSerializable
from gradio.components.base import IOComponent, _Keywords
from gradio.events import (
Changeable,
Focusable,
Inputable,
Submittable,
)
set_documentation_group("component")
@document()
class ColorPicker(
Changeable, Inputable, Submittable, Focusable, IOComponent, StringSerializable
):
"""
Creates a color picker for user to select a color as string input.
Preprocessing: passes selected color value as a {str} into the function.
Postprocessing: expects a {str} returned from function and sets color picker value to it.
Examples-format: a {str} with a hexadecimal representation of a color, e.g. "#ff0000" for red.
Demos: color_picker, color_generator
"""
def __init__(
self,
value: str | Callable | None = None,
*,
label: str | None = None,
info: str | None = None,
every: float | None = None,
show_label: bool | None = None,
container: bool = True,
scale: int | None = None,
min_width: int = 160,
interactive: bool | None = None,
visible: bool = True,
elem_id: str | None = None,
elem_classes: list[str] | str | None = None,
**kwargs,
):
"""
Parameters:
value: default text to provide in color picker. If callable, the function will be called whenever the app loads to set the initial value of the component.
label: component name in interface.
info: additional component description.
every: If `value` is a callable, run the function 'every' number of seconds while the client connection is open. Has no effect otherwise. Queue must be enabled. The event can be accessed (e.g. to cancel it) via this component's .load_event attribute.
show_label: if True, will display label.
container: If True, will place the component in a container - providing some extra padding around the border.
scale: relative width compared to adjacent Components in a Row. For example, if Component A has scale=2, and Component B has scale=1, A will be twice as wide as B. Should be an integer.
min_width: minimum pixel width, will wrap if not sufficient screen space to satisfy this value. If a certain scale value results in this Component being narrower than min_width, the min_width parameter will be respected first.
interactive: if True, will be rendered as an editable color picker; if False, editing will be disabled. If not provided, this is inferred based on whether the component is used as an input or output.
visible: If False, component will be hidden.
elem_id: An optional string that is assigned as the id of this component in the HTML DOM. Can be used for targeting CSS styles.
elem_classes: An optional list of strings that are assigned as the classes of this component in the HTML DOM. Can be used for targeting CSS styles.
"""
IOComponent.__init__(
self,
label=label,
info=info,
every=every,
show_label=show_label,
container=container,
scale=scale,
min_width=min_width,
interactive=interactive,
visible=visible,
elem_id=elem_id,
elem_classes=elem_classes,
value=value,
**kwargs,
)
def example_inputs(self) -> dict[str, Any]:
return {
"raw": "#000000",
"serialized": "#000000",
}
def get_config(self):
return {
"value": self.value,
**IOComponent.get_config(self),
}
@staticmethod
def METHOD_NAME(
value: str | Literal[_Keywords.NO_VALUE] | None = _Keywords.NO_VALUE,
label: str | None = None,
info: str | None = None,
show_label: bool | None = None,
container: bool | None = None,
scale: int | None = None,
min_width: int | None = None,
visible: bool | None = None,
interactive: bool | None = None,
):
return {
"value": value,
"label": label,
"info": info,
"show_label": show_label,
"container": container,
"scale": scale,
"min_width": min_width,
"visible": visible,
"interactive": interactive,
"__type__": "update",
}
def preprocess(self, x: str | None) -> str | None:
"""
Any preprocessing needed to be performed on function input.
Parameters:
x: text
Returns:
text
"""
if x is None:
return None
else:
return str(x)
def postprocess(self, y: str | None) -> str | None:
"""
Any postprocessing needed to be performed on function output.
Parameters:
y: text
Returns:
text
"""
if y is None:
return None
else:
return str(y) |
299,781 | moveto | #!/usr/bin/env python3
# This file is part of Shoebot.
# Copyright (C) 2007-2009 the Shoebot authors
# See the COPYING file for the full license text.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Cairo implementation of the canvas."""
import os.path
from math import pi as _pi
from .backend import cairo
from .input_device import InputDeviceMixin
from .canvas import Canvas
from .drawqueue import DrawQueue
class CairoCanvas(Canvas):
"""Cairo implementation of Canvas."""
def __init__(self, sink):
Canvas.__init__(self, sink)
self.size = None
def initial_drawqueue(self):
return DrawQueue()
def initial_transform(self):
"""Return an identity matrix."""
return cairo.Matrix()
def get_input_device(self):
if isinstance(self.sink, InputDeviceMixin):
return self.sink
else:
return None
def reset_drawqueue(self):
self._drawqueue = self.initial_drawqueue()
self._drawqueue.append(self.ctx_render_background)
def reset_transform(self):
self.mode = self.DEFAULT_MODE
self.transform = self.initial_transform()
return self.transform
# Draw stuff
def push_matrix(self):
self.matrix_stack.append(self.transform)
self.transform = cairo.Matrix(*self.transform)
def pop_matrix(self):
self.transform = self.matrix_stack.pop()
def translate(self, xt, yt):
self.transform.translate(xt, yt)
def rotate(self, radians):
self.transform.rotate(radians)
def scale(self, w, h):
self.transform.scale(w, h)
def moveto_closure(self, x, y):
def METHOD_NAME(ctx):
ctx.move_to(x, y)
return METHOD_NAME
def relmoveto_closure(self, x, y):
def relmoveto(ctx):
ctx.rel_move_to(x, y)
return relmoveto
def lineto_closure(self, x, y):
def lineto(ctx):
ctx.line_to(x, y)
return lineto
def curveto_closure(self, x1, y1, x2, y2, x3, y3):
def curveto(ctx):
ctx.curve_to(x1, y1, x2, y2, x3, y3)
return curveto
def arc_closure(self, x, y, radius, angle1, angle2):
def arc(ctx):
ctx.arc(x, y, radius, angle1, angle2)
return arc
def closepath_closure(self):
def closepath(ctx):
ctx.close_path()
return closepath
def ellipse_closure(self, x, y, w, h):
def ellipse(ctx):
if w != 0.0 and h != 0.0:
ctx.save()
ctx.translate(x + w / 2.0, y + h / 2.0)
ctx.scale(w * 0.5, h * 0.5)
ctx.arc(0.0, 0.0, 1.0, 0.0, 2 * _pi)
ctx.close_path()
ctx.restore()
return ellipse
def relcurveto_closure(self, x1, y1, x2, y2, x3, y3):
def relcurveto(ctx):
ctx.rel_curve_to(x1, y1, x2, y2, x3, y3)
return relcurveto
def rellineto_closure(self, x, y):
def rellineto(ctx):
ctx.rel_line_to(x, y)
return rellineto
def output_closure(self, target):
"""Function to output to a cairo surface.
target is a cairo Context or filename
"""
def output_context(ctx):
target_ctx = target
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return target_ctx
def output_surface(ctx):
target_ctx = cairo.Context(target)
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return target_ctx
def output_file(ctx):
root, extension = os.path.splitext(target)
filename = target
extension = extension.lower()
if extension == ".png":
surface = ctx.get_target()
surface.write_to_png(target)
elif extension == ".pdf":
target_ctx = cairo.Context(
cairo.PDFSurface(filename, *self.size_or_default()),
)
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
elif extension in (".ps", ".eps"):
target_ctx = cairo.Context(
cairo.PSSurface(filename, *self.size_or_default()),
)
if extension == ".eps":
target_ctx.set_eps(extension=".eps")
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
elif extension == ".svg":
surface = cairo.SVGSurface(filename, *self.size_or_default())
surface.restrict_to_version(cairo.SVGVersion.VERSION_1_2)
target_ctx = cairo.Context(surface)
target_ctx.set_source_surface(ctx.get_target())
target_ctx.paint()
return filename
if isinstance(target, cairo.Context):
return output_context
elif isinstance(target, cairo.Surface):
return output_surface
else:
return output_file
def ctx_render_background(self, cairo_ctx):
"""Draws the background colour of the bot."""
# TODO - rename this
cairo_ctx.set_source_rgba(*self.background)
cairo_ctx.paint() |
299,782 | pre operations | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"vmss delete",
)
class Delete(AAZCommand):
"""Delete a VM scale set.
"""
_aaz_info = {
"version": "2017-03-30",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.compute/virtualmachinescalesets/{}", "2017-03-30"],
]
}
AZ_SUPPORT_NO_WAIT = True
def _handler(self, command_args):
super()._handler(command_args)
return self.build_lro_poller(self._execute_operations, self._output)
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.vm_scale_set_name = AAZStrArg(
options=["-n", "--name", "--vm-scale-set-name"],
help="The name of the VM scale set.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.METHOD_NAME()
yield self.VirtualMachineScaleSetsDelete(ctx=self.ctx)()
self.post_operations()
@register_callback
def METHOD_NAME(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class VirtualMachineScaleSetsDelete(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [202]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [200]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_200,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
if session.http_response.status_code in [204]:
return self.client.build_lro_polling(
self.ctx.args.no_wait,
session,
self.on_204,
self.on_error,
lro_options={"final-state-via": "azure-async-operation"},
path_format_arguments=self.url_parameters,
)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachineScaleSets/{vmScaleSetName}",
**self.url_parameters
)
@property
def method(self):
return "DELETE"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
**self.serialize_url_param(
"vmScaleSetName", self.ctx.args.vm_scale_set_name,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2017-03-30",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.end_time = AAZStrType(
serialized_name="endTime",
flags={"read_only": True},
)
_schema_on_200.error = AAZObjectType()
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.start_time = AAZStrType(
serialized_name="startTime",
flags={"read_only": True},
)
_schema_on_200.status = AAZStrType(
flags={"read_only": True},
)
error = cls._schema_on_200.error
error.code = AAZStrType()
error.details = AAZListType()
error.innererror = AAZObjectType()
error.message = AAZStrType()
error.target = AAZStrType()
details = cls._schema_on_200.error.details
details.Element = AAZObjectType()
_element = cls._schema_on_200.error.details.Element
_element.code = AAZStrType()
_element.message = AAZStrType()
_element.target = AAZStrType()
innererror = cls._schema_on_200.error.innererror
innererror.errordetail = AAZStrType()
innererror.exceptiontype = AAZStrType()
return cls._schema_on_200
def on_204(self, session):
pass
class _DeleteHelper:
"""Helper class for Delete"""
__all__ = ["Delete"] |
299,783 | test call func expect queue | # IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.core.session.current_user import (
PreferencesModel,
copy_user,
)
from openbb_terminal.cryptocurrency.tools import tools_controller
# pylint: disable=E1101
# pylint: disable=W0603
# pylint: disable=E1111
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["load", "help"], ["help"]),
(["quit", "help"], ["help"]),
],
)
def test_menu_with_queue(expected, mocker, queue):
path_controller = "openbb_terminal.cryptocurrency.tools.tools_controller"
# MOCK SWITCH
mocker.patch(
target=f"{path_controller}.ToolsController.switch",
return_value=["quit"],
)
result_menu = tools_controller.ToolsController(queue=queue).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
path_controller = "openbb_terminal.cryptocurrency.tools.tools_controller"
# ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU
preferences = PreferencesModel(USE_PROMPT_TOOLKIT=True)
mock_current_user = copy_user(preferences=preferences)
mocker.patch(
target="openbb_terminal.core.session.current_user.__current_user",
new=mock_current_user,
)
mocker.patch(
target="openbb_terminal.parent_classes.session",
)
mocker.patch(
target="openbb_terminal.parent_classes.session.prompt",
return_value="quit",
)
# DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER
preferences = PreferencesModel(USE_PROMPT_TOOLKIT=True)
mock_current_user = copy_user(preferences=preferences)
mocker.patch(
target="openbb_terminal.core.session.current_user.__current_user",
new=mock_current_user,
)
mocker.patch(
target=f"{path_controller}.session",
)
mocker.patch(
target=f"{path_controller}.session.prompt",
return_value="quit",
)
result_menu = tools_controller.ToolsController(queue=None).menu()
assert result_menu == ["help"]
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["help", "homee help", "home help", "mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
path_controller = "openbb_terminal.cryptocurrency.tools.tools_controller"
# DISABLE AUTO-COMPLETION
preferences = PreferencesModel(USE_PROMPT_TOOLKIT=True)
mock_current_user = copy_user(preferences=preferences)
mocker.patch(
target="openbb_terminal.core.session.current_user.__current_user",
new=mock_current_user,
)
mocker.patch(
target=f"{path_controller}.session",
return_value=None,
)
# MOCK USER INPUT
mocker.patch("builtins.input", return_value=mock_input)
# MOCK SWITCH
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=f"{path_controller}.ToolsController.switch",
new=mock_switch,
)
result_menu = tools_controller.ToolsController(queue=None).menu()
assert result_menu == ["help"]
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_print_help():
controller = tools_controller.ToolsController(queue=None)
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
("/help", ["home", "help"]),
("help/help", ["help", "help"]),
("q", ["quit"]),
("h", []),
(
"r",
["quit", "quit", "reset", "crypto", "tools"],
),
],
)
def test_switch(an_input, expected_queue):
controller = tools_controller.ToolsController(queue=None)
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = tools_controller.ToolsController(queue=None)
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
["quit", "quit", "quit"],
),
("call_exit", ["help"], ["quit", "quit", "quit", "help"]),
("call_home", [], ["quit", "quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
["quit", "quit", "reset", "crypto", "tools"],
),
(
"call_reset",
["help"],
["quit", "quit", "reset", "crypto", "tools", "help"],
),
],
)
def METHOD_NAME(expected_queue, func, queue):
controller = tools_controller.ToolsController(queue=queue)
result = getattr(controller, func)([])
assert result is None
assert controller.queue == expected_queue
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"tested_func, other_args, mocked_func, called_args, called_kwargs",
[
(
"call_aprtoapy",
[],
"tools_view.display_apy",
[],
dict(),
),
],
)
def test_call_func(
tested_func, mocked_func, other_args, called_args, called_kwargs, mocker
):
path_controller = "openbb_terminal.cryptocurrency.tools.tools_controller"
if mocked_func:
mock = mocker.Mock()
mocker.patch(
target=f"{path_controller}.{mocked_func}",
new=mock,
)
controller = tools_controller.ToolsController(queue=None)
getattr(controller, tested_func)(other_args)
if called_args or called_kwargs:
mock.assert_called_once_with(*called_args, **called_kwargs)
else:
mock.assert_called_once()
else:
controller = tools_controller.ToolsController(queue=None)
getattr(controller, tested_func)(other_args) |
299,784 | fund script secp | """SECP256k1 tests for spending with Plutus V2 using `transaction build-raw`."""
import logging
import typing as tp
import allure
import pytest
from _pytest.fixtures import SubRequest
from cardano_clusterlib import clusterlib
from cardano_node_tests.cluster_management import cluster_management
from cardano_node_tests.tests import common
from cardano_node_tests.tests import plutus_common
from cardano_node_tests.tests.tests_plutus_v2 import spend_raw
from cardano_node_tests.utils import clusterlib_utils
from cardano_node_tests.utils import helpers
LOGGER = logging.getLogger(__name__)
pytestmark = [
common.SKIPIF_PLUTUSV2_UNUSABLE,
pytest.mark.smoke,
pytest.mark.plutus,
]
@pytest.fixture
def payment_addrs(
cluster_manager: cluster_management.ClusterManager,
cluster: clusterlib.ClusterLib,
) -> tp.List[clusterlib.AddressRecord]:
"""Create new payment addresses."""
test_id = common.get_test_id(cluster)
addrs = clusterlib_utils.create_payment_addr_records(
*[f"{test_id}_payment_addr_{i}" for i in range(2)],
cluster_obj=cluster,
)
# fund source address
clusterlib_utils.fund_from_faucet(
addrs[0],
cluster_obj=cluster,
faucet_data=cluster_manager.cache.addrs_data["user1"],
amount=3_000_000_000,
)
return addrs
@pytest.mark.testnets
class TestSECP256k1:
@pytest.fixture
def METHOD_NAME(
self,
cluster: clusterlib.ClusterLib,
payment_addrs: tp.List[clusterlib.AddressRecord],
request: SubRequest,
) -> tp.Tuple[str, tp.List[clusterlib.UTXOData], tp.List[clusterlib.UTXOData]]:
"""Fund a Plutus script and create the necessary Tx outputs."""
algorithm = request.param
temp_template = f"{common.get_test_id(cluster)}_{algorithm}"
payment_addr = payment_addrs[0]
dst_addr = payment_addrs[1]
amount = 2_000_000
script_file = (
plutus_common.SECP256K1_LOOP_ECDSA_PLUTUS_V2
if algorithm == "ecdsa"
else plutus_common.SECP256K1_LOOP_SCHNORR_PLUTUS_V2
)
script_address = cluster.g_address.gen_payment_addr(
addr_name=temp_template, payment_script_file=script_file
)
execution_units = (
plutus_common.SECP256K1_ECDSA_LOOP_COST
if algorithm == "ecdsa"
else plutus_common.SECP256K1_SCHNORR_LOOP_COST
)
redeem_cost = plutus_common.compute_cost(
execution_cost=execution_units,
protocol_params=cluster.g_query.get_protocol_params(),
)
tx_files = clusterlib.TxFiles(
signing_key_files=[payment_addr.skey_file],
)
txouts = [
clusterlib.TxOut(
address=script_address,
amount=amount + redeem_cost.fee + spend_raw.FEE_REDEEM_TXSIZE,
inline_datum_file=plutus_common.DATUM_42_TYPED,
),
# for collateral
clusterlib.TxOut(address=dst_addr.address, amount=redeem_cost.collateral),
]
tx_raw_output = cluster.g_transaction.send_tx(
src_address=payment_addr.address,
tx_name=f"{temp_template}_step1",
txouts=txouts,
tx_files=tx_files,
)
txid = cluster.g_transaction.get_txid(tx_body_file=tx_raw_output.out_file)
script_utxos = cluster.g_query.get_utxo(txin=f"{txid}#0")
assert script_utxos, "No script UTxO"
collateral_utxos = cluster.g_query.get_utxo(txin=f"{txid}#1")
assert collateral_utxos, "No collateral UTxO"
return algorithm, script_utxos, collateral_utxos
@allure.link(helpers.get_vcs_link())
@pytest.mark.parametrize("fund_script_secp", ("ecdsa", "schnorr"), indirect=True)
def test_use_secp_builtin_functions(
self,
cluster: clusterlib.ClusterLib,
payment_addrs: tp.List[clusterlib.AddressRecord],
METHOD_NAME: tp.Tuple[str, tp.List[clusterlib.UTXOData], tp.List[clusterlib.UTXOData]],
):
"""Test that it is possible to spend a locked UTxO by a script that uses a SECP function.
* create the necessary Tx outputs
* spend the locked UTxO
* check that script address UTxO was spent
"""
amount = 2_000_000
# create the necessary Tx outputs
algorithm, script_utxos, collateral_utxos = METHOD_NAME
temp_template = f"{common.get_test_id(cluster)}_{algorithm}"
script_file = (
plutus_common.SECP256K1_LOOP_ECDSA_PLUTUS_V2
if algorithm == "ecdsa"
else plutus_common.SECP256K1_LOOP_SCHNORR_PLUTUS_V2
)
redeemer_dir = (
plutus_common.SEPC256K1_ECDSA_DIR
if algorithm == "ecdsa"
else plutus_common.SEPC256K1_SCHNORR_DIR
)
redeemer_file = redeemer_dir / "loop_script.redeemer"
execution_units = (
plutus_common.SECP256K1_ECDSA_LOOP_COST
if algorithm == "ecdsa"
else plutus_common.SECP256K1_SCHNORR_LOOP_COST
)
plutus_op = plutus_common.PlutusOp(
script_file=script_file,
datum_file=plutus_common.DATUM_42_TYPED,
redeemer_file=redeemer_file,
execution_cost=execution_units,
)
# for mypy
assert plutus_op.script_file
assert plutus_op.redeemer_file
assert plutus_op.execution_cost
# spend the "locked" UTxO
plutus_txins = [
clusterlib.ScriptTxIn(
txins=script_utxos,
script_file=plutus_op.script_file,
collaterals=collateral_utxos,
redeemer_file=plutus_op.redeemer_file,
inline_datum_present=True,
execution_units=(
plutus_op.execution_cost.per_time,
plutus_op.execution_cost.per_space,
),
)
]
tx_files_redeem = clusterlib.TxFiles(
signing_key_files=[payment_addrs[1].skey_file],
)
txouts_redeem = [
clusterlib.TxOut(address=payment_addrs[0].address, amount=amount),
]
try:
cluster.g_transaction.send_tx(
src_address=payment_addrs[0].address,
tx_name=f"{temp_template}_step1",
txouts=txouts_redeem,
tx_files=tx_files_redeem,
script_txins=plutus_txins,
fee=300_000,
)
except clusterlib.CLIError as err:
plutus_common.xfail_on_secp_error(
cluster_obj=cluster, algorithm=algorithm, err_msg=str(err)
)
raise
# check that script address UTxO was spent
assert not cluster.g_query.get_utxo(
utxo=script_utxos[0]
), f"Script address UTxO was NOT spent `{script_utxos}`" |
299,785 | resolve submission path | """Autograder runner for R assignments"""
import copy
import nbformat as nbf
import os
import re
import tempfile
import yaml
from glob import glob
from nbconvert.exporters import ScriptExporter
from rpy2.robjects.packages import importr
from .abstract_runner import AbstractLanguageRunner
from ..utils import OtterRuntimeError
from ....export import export_notebook
from ....generate.token import APIClient
from ....test_files import GradingResults
from ....utils import chdir, get_source, knit_rmd_file, NBFORMAT_VERSION
R_PACKAGES = {
"knitr": importr("knitr"),
"ottr": importr("ottr"),
}
RMD_YAML_REGEX = r"^\n*---\n([\s\S]+?)\n---"
class RRunner(AbstractLanguageRunner):
subm_path_deletion_required = False
"""whether the submission path needs to be deleted (because it was created with tempfile)"""
def validate_submission(self, submission_path):
assignment_name = False
ext = os.path.splitext(submission_path)[1].lower()
if ext == ".ipynb":
nb = nbf.read(submission_path, as_version=nbf.NO_CONVERT)
assignment_name = self.get_notebook_assignment_name(nb)
elif ext == ".rmd":
assignment_name = None
with open(submission_path) as f:
rmd = f.read()
config = re.match(RMD_YAML_REGEX, rmd)
if config:
config = config.group(1)
assignment_name = yaml.full_load(config).get("assignment_name", None)
if assignment_name is not False:
self.validate_assignment_name(assignment_name)
def filter_cells_with_syntax_errors(self, nb):
"""
Filter out cells in an R notebook with syntax errors.
"""
new_cells = []
for cell in nb["cells"]:
if cell["cell_type"] == "code":
source = "\n".join(get_source(cell))
valid_syntax = R_PACKAGES["ottr"].valid_syntax(source)[0]
if valid_syntax:
new_cells.append(cell)
nb = copy.deepcopy(nb)
nb["cells"] = new_cells
return nb
def add_seeds_to_rmd_file(self, rmd_path):
"""
Add intercell seeding to an Rmd file.
"""
with open(rmd_path) as f:
rmd = f.read()
lines = rmd.split("\n")
insertions = []
for i, line in enumerate(lines):
if line.startswith("```{r"):
insertions.append(i)
seed = f"set.seed({self.ag_config.seed})"
if self.ag_config.seed_variable:
seed = f"{self.ag_config.seed_variable} = {self.ag_config.seed}"
for i in insertions[::-1]:
lines.insert(i + 1, seed)
with open(rmd_path, "w") as f:
f.write("\n".join(lines))
def add_seed_to_script(self, script_path):
"""
Add a line calling ``set.seed`` to the top of the R script at the specified path.
"""
with open(script_path) as f:
script = f.read()
script = f"set.seed({self.ag_config.seed})\n" + script
with open(script_path, "w") as f:
f.write(script)
def METHOD_NAME(self):
# create a temporary file at which to write a script if necessary
_, script_path = tempfile.mkstemp(suffix=".R")
# convert IPYNB files to Rmd files
nbs = glob("*.ipynb")
if len(nbs) > 1:
raise OtterRuntimeError("More than one IPYNB file found in submission")
elif len(nbs) == 1:
nb_path = nbs[0]
self.validate_submission(nb_path)
nb = nbf.read(nb_path, as_version=NBFORMAT_VERSION)
nb = self.filter_cells_with_syntax_errors(nb)
# create the R script
script, _ = ScriptExporter().from_notebook_node(nb)
with open(script_path, "w") as f:
f.write(script)
self.subm_path_deletion_required = True
return script_path
# convert Rmd files to R files
rmds = glob("*.Rmd")
if len(rmds) > 1:
raise OtterRuntimeError("More than one Rmd file found in submission")
elif len(rmds) == 1:
rmd_path = rmds[0]
self.validate_submission(rmd_path)
# add seeds
if self.ag_config.seed is not None:
self.add_seeds_to_rmd_file(rmd_path)
# create the R script
rmd_path = os.path.abspath(rmd_path)
R_PACKAGES["knitr"].purl(rmd_path, script_path)
self.subm_path_deletion_required = True
return script_path
os.remove(script_path)
# get the R script
scripts = glob("*.[Rr]")
if len(scripts) > 1:
raise OtterRuntimeError("More than one R script found in submission")
elif len(scripts) == 0:
raise OtterRuntimeError("No gradable files found in submission")
if self.ag_config.seed is not None:
self.add_seed_to_script(scripts[0])
return scripts[0]
def write_pdf(self, _):
# NOTE: this method ignores the submission_path argument, and instead resolves it again
# manually
# TODO: de-deduplicate this path resolution logic with resolve_submission_path
nbs = glob("*.ipynb")
if nbs:
subm_path = nbs[0]
ipynb = True
else:
rmds = glob("*.Rmd")
if rmds:
subm_path = rmds[0]
ipynb = False
else:
raise OtterRuntimeError("Could not find a file that can be converted to a PDF")
pdf_path = os.path.splitext(subm_path)[0] + ".pdf"
if ipynb:
export_notebook(
subm_path, dest=pdf_path, filtering=self.ag_config.filtering,
pagebreaks=self.ag_config.pagebreaks, exporter_type="latex")
else:
knit_rmd_file(subm_path, pdf_path)
return pdf_path
def run(self):
os.environ["PATH"] = f"{self.ag_config.miniconda_path}/bin:" + os.environ.get("PATH")
with chdir("./submission"):
if self.ag_config.token is not None:
client = APIClient(token=self.ag_config.token)
generate_pdf = True
has_token = True
else:
generate_pdf = self.ag_config.pdf
has_token = False
client = None
subm_path = self.METHOD_NAME()
output = R_PACKAGES["ottr"].run_autograder(
subm_path, ignore_errors = not self.ag_config.debug, test_dir = "./tests")[0]
scores = GradingResults.from_ottr_json(output)
if generate_pdf:
self.write_and_maybe_submit_pdf(client, None, has_token, scores)
# delete the script if necessary
if self.subm_path_deletion_required:
os.remove(subm_path)
self.subm_path_deletion_required = False
return scores |
299,786 | transform | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for logical expressions, e.g. `a and b -> tf.logical_and(a, b)`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
# TODO(mdan): Properly extrack boolean ops according to lazy eval rules.
# Note that this isn't completely safe either, because tensors may have control
# dependencies.
# Note that for loops that should be done after the loop was converted to
# tf.while_loop so that the expanded conditionals are properly scoped.
# Used to signal that an operand is safe for non-lazy evaluation.
SAFE_BOOLEAN_OPERAND = 'SAFE_BOOLEAN_OPERAND'
LOGICAL_OPERATORS = {
gast.And: 'ag__.and_',
gast.Not: 'ag__.not_',
gast.Or: 'ag__.or_',
}
EQUALITY_OPERATORS = {
gast.Eq: 'ag__.eq',
gast.NotEq: 'ag__.not_eq',
}
class LogicalExpressionTransformer(converter.Base):
"""Converts logical expressions to corresponding TF calls."""
def _overload_of(self, operator):
op_type = type(operator)
if op_type in LOGICAL_OPERATORS:
return LOGICAL_OPERATORS[op_type]
if self.ctx.program.options.uses(converter.Feature.EQUALITY_OPERATORS):
if op_type in EQUALITY_OPERATORS:
return EQUALITY_OPERATORS[op_type]
return None
def _as_lambda(self, expr):
return templates.replace_as_expression('lambda: expr', expr=expr)
def _as_binary_function(self, func_name, arg1, arg2):
return templates.replace_as_expression(
'func_name(arg1, arg2)',
func_name=parser.parse_expression(func_name),
arg1=arg1,
arg2=arg2)
def _as_binary_operation(self, op, arg1, arg2):
template = templates.replace_as_expression(
'arg1 is arg2',
arg1=arg1,
arg2=arg2)
template.ops[0] = op
return template
def _as_unary_function(self, func_name, arg):
return templates.replace_as_expression(
'func_name(arg)', func_name=parser.parse_expression(func_name), arg=arg)
def visit_Compare(self, node):
node = self.generic_visit(node)
if (not self.ctx.program.options.uses(
converter.Feature.EQUALITY_OPERATORS)):
return node
ops_and_comps = list(zip(node.ops, node.comparators))
left = node.left
# Repeated comparisons are converted to conjunctions:
# a < b < c -> a < b and b < c
op_tree = None
while ops_and_comps:
op, right = ops_and_comps.pop(0)
overload = self._overload_of(op)
if overload is not None:
binary_comparison = self._as_binary_function(overload, left, right)
else:
binary_comparison = self._as_binary_operation(op, left, right)
if op_tree is not None:
op_tree = self._as_binary_function('ag__.and_',
self._as_lambda(op_tree),
self._as_lambda(binary_comparison))
else:
op_tree = binary_comparison
left = right
assert op_tree is not None
return op_tree
def visit_UnaryOp(self, node):
node = self.generic_visit(node)
overload = self._overload_of(node.op)
if overload is None:
return node
return self._as_unary_function(overload, node.operand)
def visit_BoolOp(self, node):
node = self.generic_visit(node)
node_values = node.values
right = node.values.pop()
while node_values:
left = node_values.pop()
right = self._as_binary_function(
self._overload_of(node.op), self._as_lambda(left),
self._as_lambda(right))
return right
def METHOD_NAME(node, ctx):
transformer = LogicalExpressionTransformer(ctx)
return transformer.visit(node) |
299,787 | pub year | import hashlib
import logging
from gettext import gettext as _
from lxml import etree
LOGGER = logging.getLogger(__name__)
LOGGER_FMT = "%(asctime)s [%(levelname)s] %(name)s: %(message)s"
class PidProviderXMLAdapter:
def __init__(self, xml_with_pre, pkg_name=None):
self.xml_with_pre = xml_with_pre
self.pkg_name = pkg_name
def tostring(self):
return self.xml_with_pre.tostring()
@property
def sps_pkg_name(self):
return self.xml_with_pre.sps_pkg_name
@property
def finger_print(self):
return self.xml_with_pre.finger_print
@property
def related_items(self):
return self.xml_with_pre.related_items
@property
def journal_issn_electronic(self):
return self.xml_with_pre.journal_issn_electronic
@property
def journal_issn_print(self):
return self.xml_with_pre.journal_issn_print
@property
def v2_prefix(self):
return self.xml_with_pre.v2_prefix
@property
def volume(self):
return self.xml_with_pre.volume
@property
def number(self):
return self.xml_with_pre.number
@property
def suppl(self):
return self.xml_with_pre.suppl
@property
def METHOD_NAME(self):
return self.xml_with_pre.METHOD_NAME
@property
def article_pub_year(self):
return self.xml_with_pre.article_pub_year
@property
def main_doi(self):
return self.xml_with_pre.main_doi
@property
def main_toc_section(self):
return self.xml_with_pre.main_toc_section
@property
def is_aop(self):
return self.xml_with_pre.is_aop
@property
def elocation_id(self):
return self.xml_with_pre.elocation_id
@property
def fpage(self):
return self.xml_with_pre.fpage
@property
def fpage_seq(self):
return self.xml_with_pre.fpage_seq
@property
def lpage(self):
return self.xml_with_pre.lpage
@property
def v2(self):
return self.xml_with_pre.v2
@v2.setter
def v2(self, value):
self.xml_with_pre.v2 = value
@property
def v3(self):
return self.xml_with_pre.v3
@v3.setter
def v3(self, value):
self.xml_with_pre.v3 = value
@property
def aop_pid(self):
return self.xml_with_pre.aop_pid
@aop_pid.setter
def aop_pid(self, value):
self.xml_with_pre.aop_pid = value
@property
def z_links(self):
if not hasattr(self, "_links") or not self._links:
self._links = _str_with_64_char("|".join(self.xml_with_pre.links))
return self._links
@property
def z_collab(self):
if not hasattr(self, "_collab") or not self._collab:
self._collab = _str_with_64_char(self.xml_with_pre.collab)
return self._collab
@property
def z_surnames(self):
if not hasattr(self, "_surnames") or not self._surnames:
self._surnames = _str_with_64_char(
"|".join(
[
_standardize(person.get("surname"))
for person in self.xml_with_pre.authors.get("person")
]
)
)
return self._surnames
@property
def z_article_titles_texts(self):
return _str_with_64_char(
"|".join(sorted(self.xml_with_pre.article_titles_texts or []))
)
@property
def z_partial_body(self):
if not hasattr(self, "_partial_body") or not self._partial_body:
self._partial_body = _str_with_64_char(self.xml_with_pre.partial_body)
return self._partial_body
def query_params(self, filter_by_issue=False, aop_version=False):
"""
Get query parameters
Arguments
---------
filter_by_issue: bool
aop_version: bool
Returns
-------
dict
"""
_params = dict(
z_surnames=self.z_surnames or None,
z_collab=self.z_collab or None,
)
if not any(_params.values()):
_params["main_doi"] = self.main_doi
if not any(_params.values()):
_params["z_links"] = self.z_links
if not any(_params.values()):
_params["pkg_name"] = self.sps_pkg_name
if not any(_params.values()):
_params["z_partial_body"] = self.z_partial_body
_params["elocation_id"] = self.elocation_id
if aop_version:
_params["issue__isnull"] = True
else:
if filter_by_issue:
_params["issue__pub_year"] = self.METHOD_NAME
_params["issue__volume"] = self.volume
_params["issue__number"] = self.number
_params["issue__suppl"] = self.suppl
_params["fpage"] = self.fpage
_params["fpage_seq"] = self.fpage_seq
_params["lpage"] = self.lpage
_params["journal__issn_print"] = self.journal_issn_print
_params["journal__issn_electronic"] = self.journal_issn_electronic
_params["article_pub_year"] = self.article_pub_year
_params["z_article_titles_texts"] = self.z_article_titles_texts
LOGGER.info(_params)
return _params
@classmethod
def adapt_query_params(cls, params):
"""
Adapt query parameters
Parameters
----------
params : dict
Returns
-------
dict
"""
_params = params.copy()
LOGGER.info(f"Adapt params input: {_params}")
attr_names = (
"main_doi",
"pkg_name",
"elocation_id",
"issue__volume",
"issue__number",
"issue__suppl",
"fpage",
"fpage_seq",
"lpage",
)
for attr_name in attr_names:
try:
_params[f"{attr_name}__iexact"] = _params.pop(attr_name)
except KeyError:
continue
LOGGER.info(f"Adapt params output: {_params}")
return _params
@property
def query_list(self):
items = []
if self.is_aop:
LOGGER.info("self.is_aop")
# o xml_adapter não contém dados de issue
# não indica na consulta o valor para o atributo issue
# então o registro encontrado pode ou não ter dados de issue
params = self.query_params(aop_version=False)
items.append(params)
else:
# o xml_adapter contém dados de issue
# inclui na consulta os dados de issue
LOGGER.info("not self.is_aop")
params = self.query_params(filter_by_issue=True)
items.append(params)
# busca por registro cujo valor de issue is None
params = self.query_params(aop_version=True)
items.append(params)
return items
def _standardize(text):
return (text or "").strip().upper()
def _str_with_64_char(text):
"""
>>> import hashlib
>>> m = hashlib.sha256()
>>> m.update(b"Nobody inspects")
>>> m.update(b" the spammish repetition")
>>> m.digest()
b'\x03\x1e\xdd}Ae\x15\x93\xc5\xfe\\\x00o\xa5u+7\xfd\xdf\xf7\xbcN\x84:\xa6\xaf\x0c\x95\x0fK\x94\x06'
>>> m.digest_size
32
>>> m.block_size
64
hashlib.sha224(b"Nobody inspects the spammish repetition").hexdigest()
"""
if not text:
return None
return hashlib.sha256(_standardize(text).encode("utf-8")).hexdigest() |
299,788 | safe extra | """
Helpers for normalization as expected in wheel/sdist/module file names
and core metadata
"""
import re
from pathlib import Path
from typing import Union
from .extern import packaging
from .warnings import SetuptoolsDeprecationWarning
_Path = Union[str, Path]
# https://packaging.python.org/en/latest/specifications/core-metadata/#name
_VALID_NAME = re.compile(r"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$", re.I)
_UNSAFE_NAME_CHARS = re.compile(r"[^A-Z0-9.]+", re.I)
_NON_ALPHANUMERIC = re.compile(r"[^A-Z0-9]+", re.I)
def safe_identifier(name: str) -> str:
"""Make a string safe to be used as Python identifier.
>>> safe_identifier("12abc")
'_12abc'
>>> safe_identifier("__editable__.myns.pkg-78.9.3_local")
'__editable___myns_pkg_78_9_3_local'
"""
safe = re.sub(r'\W|^(?=\d)', '_', name)
assert safe.isidentifier()
return safe
def safe_name(component: str) -> str:
"""Escape a component used as a project name according to Core Metadata.
>>> safe_name("hello world")
'hello-world'
>>> safe_name("hello?world")
'hello-world'
"""
# See pkg_resources.safe_name
return _UNSAFE_NAME_CHARS.sub("-", component)
def safe_version(version: str) -> str:
"""Convert an arbitrary string into a valid version string.
>>> safe_version("1988 12 25")
'1988.12.25'
>>> safe_version("v0.2.1")
'0.2.1'
>>> safe_version("v0.2?beta")
'0.2b0'
>>> safe_version("v0.2 beta")
'0.2b0'
>>> safe_version("ubuntu lts")
Traceback (most recent call last):
...
setuptools.extern.packaging.version.InvalidVersion: Invalid version: 'ubuntu.lts'
"""
v = version.replace(' ', '.')
try:
return str(packaging.version.Version(v))
except packaging.version.InvalidVersion:
attempt = _UNSAFE_NAME_CHARS.sub("-", v)
return str(packaging.version.Version(attempt))
def best_effort_version(version: str) -> str:
"""Convert an arbitrary string into a version-like string.
>>> best_effort_version("v0.2 beta")
'0.2b0'
>>> import warnings
>>> warnings.simplefilter("ignore", category=SetuptoolsDeprecationWarning)
>>> best_effort_version("ubuntu lts")
'ubuntu.lts'
"""
# See pkg_resources.safe_version
try:
return safe_version(version)
except packaging.version.InvalidVersion:
SetuptoolsDeprecationWarning.emit(
f"Invalid version: {version!r}.",
f"""
Version {version!r} is not valid according to PEP 440.
Please make sure to specify a valid version for your package.
Also note that future releases of setuptools may halt the build process
if an invalid version is given.
""",
see_url="https://peps.python.org/pep-0440/",
due_date=(2023, 9, 26), # See setuptools/dist _validate_version
)
v = version.replace(' ', '.')
return safe_name(v)
def METHOD_NAME(extra: str) -> str:
"""Normalize extra name according to PEP 685
>>> safe_extra("_FrIeNdLy-._.-bArD")
'friendly-bard'
>>> safe_extra("FrIeNdLy-._.-bArD__._-")
'friendly-bard'
"""
return _NON_ALPHANUMERIC.sub("-", extra).strip("-").lower()
def filename_component(value: str) -> str:
"""Normalize each component of a filename (e.g. distribution/version part of wheel)
Note: ``value`` needs to be already normalized.
>>> filename_component("my-pkg")
'my_pkg'
"""
return value.replace("-", "_").strip("_")
def safer_name(value: str) -> str:
"""Like ``safe_name`` but can be used as filename component for wheel"""
# See bdist_wheel.safer_name
return filename_component(safe_name(value))
def safer_best_effort_version(value: str) -> str:
"""Like ``best_effort_version`` but can be used as filename component for wheel"""
# See bdist_wheel.safer_verion
# TODO: Replace with only safe_version in the future (no need for best effort)
return filename_component(best_effort_version(value)) |
299,789 | test schedule jobs on topic inactive | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015-2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from mock import patch
def test_schedule_jobs(remoteci_context, topic):
headers = {
"User-Agent": "python-dciclient",
"Client-Version": "python-dciclient_0.1.0",
}
data = {"topic_id": topic["id"]}
r = remoteci_context.post("/api/v1/jobs/schedule", headers=headers, data=data)
assert r.status_code == 201
job = r.data["job"]
assert job["topic_id"] == topic["id"]
assert job["user_agent"] == headers["User-Agent"]
assert job["client_version"] == headers["Client-Version"]
def test_schedule_jobs_with_components_ids(user, remoteci_context, topic):
components = user.get("/api/v1/topics/%s/components" % topic["id"]).data[
"components"
]
data = {"topic_id": topic["id"], "components_ids": [components[0]["id"]]}
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 201
def test_schedule_jobs_with_previous_job_id(remoteci_context, topic):
r = remoteci_context.post("/api/v1/jobs/schedule", data={"topic_id": topic["id"]})
assert r.status_code == 201
job1 = r.data["job"]
assert job1["topic_id"] == topic["id"]
r = remoteci_context.post(
"/api/v1/jobs/schedule",
data={"topic_id": topic["id"], "previous_job_id": job1["id"]},
)
assert r.status_code == 201
job2 = r.data["job"]
assert job2["topic_id"] == topic["id"]
assert job2["previous_job_id"] == job1["id"]
def _update_remoteci(admin, id, etag, data):
url = "/api/v1/remotecis/%s" % id
r = admin.put(url, headers={"If-match": etag}, data=data)
assert r.status_code == 200
return admin.get(url).data["remoteci"]
def test_schedule_jobs_on_remoteci_inactive(admin, remoteci_context, topic):
remoteci = remoteci_context.get("/api/v1/identity").data["identity"]
remoteci["etag"] = admin.get("/api/v1/remotecis/%s" % remoteci["id"]).data[
"remoteci"
]["etag"]
remoteci = _update_remoteci(
admin, remoteci["id"], remoteci["etag"], {"state": "inactive"}
)
data = {"topic_id": topic["id"]}
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code != 201
remoteci = _update_remoteci(
admin, remoteci["id"], remoteci["etag"], {"state": "active"}
)
data = {"topic_id": topic["id"]}
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 201
def test_schedule_jobs_on_remoteci_team_inactive(
admin, remoteci_context, topic, team_user_id
):
team_etag = admin.get("/api/v1/teams/%s" % team_user_id).data["team"]["etag"]
r = admin.put(
"/api/v1/teams/%s" % team_user_id,
headers={"If-match": team_etag},
data={"state": "inactive"},
)
assert r.status_code == 200
data = {"topic_id": topic["id"]}
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 412
team_etag = admin.get("/api/v1/teams/%s" % team_user_id).data["team"]["etag"]
r = admin.put(
"/api/v1/teams/%s" % team_user_id,
headers={"If-match": team_etag},
data={"state": "active"},
)
assert r.status_code == 200
data = {"topic_id": topic["id"]}
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 201
def _update_topic(admin, topic, data):
url = "/api/v1/topics/%s" % topic["id"]
r = admin.put(url, headers={"If-match": topic["etag"]}, data=data)
assert r.status_code == 200
return admin.get(url).data["topic"]
def METHOD_NAME(admin, remoteci_context, topic, team_user_id):
admin.post("/api/v1/topics/%s/teams" % topic["id"], data={"team_id": team_user_id})
topic = _update_topic(admin, topic, {"state": "inactive"})
data = {"topic_id": topic["id"]}
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 412
topic = _update_topic(admin, topic, {"state": "active"})
data = {"topic_id": topic["id"]}
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 201
def test_schedule_jobs_kills_jobs_older_than_one_day(admin, remoteci_context, topic):
data = {"topic_id": topic["id"]}
fixed_now = datetime.datetime(2019, 1, 12, 13, 42, 20, 111136)
with patch("dci.api.v1.jobs.get_utc_now", return_value=fixed_now):
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 201
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 201
r = remoteci_context.post("/api/v1/jobs/schedule", data=data)
assert r.status_code == 201
jobs = admin.get("/api/v1/jobs?sort=-created_at").data["jobs"]
assert jobs[-1]["status"] == "killed"
assert jobs[-2]["status"] == "new"
assert jobs[-3]["status"] == "new" |
299,790 | get composite shader | # Copyright (c) 2018 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Application import Application
from UM.Resources import Resources
from UM.Math.Color import Color
from UM.View.RenderPass import RenderPass
from UM.View.GL.OpenGL import OpenGL
from typing import List
MYPY = False
if MYPY:
from UM.View.GL.ShaderProgram import ShaderProgram
class CompositePass(RenderPass):
"""A RenderPass subclass providing the final composition render.
This render pass uses the other render passes to render a final composited image.
By default, this consists of the output of the default pass, with an outline
rendered on top of it using a convolution filter.
You can use setCompositeShader() to override the shader used for the composition.
Additionally, setLayerBindings() can be used to set layer bindings, that is set,
which layer is bound to which texture unit.
:note The CompositePass should always be last in the Renderer's rendering order.
Therefore, when subclassing RenderPass make sure to use a priority lower than
RenderPass.MaximumPriority.
"""
def __init__(self, width, height):
super().__init__("composite", width, height, RenderPass.MaximumPriority)
self._shader = OpenGL.getInstance().createShaderProgram(Resources.getPath(Resources.Shaders, "composite.shader"))
theme = Application.getInstance().getTheme()
self._shader.setUniformValue("u_background_color", Color(*theme.getColor("viewport_background").getRgb()))
self._shader.setUniformValue("u_outline_color", Color(*theme.getColor("model_selection_outline").getRgb()))
self._gl = OpenGL.getInstance().getBindingsObject()
self._renderer = Application.getInstance().getRenderer()
self._layer_bindings = ["default", "selection"]
def METHOD_NAME(self) -> "ShaderProgram":
"""Get the shader currently used for compositing."""
return self._shader
def setCompositeShader(self, shader: "ShaderProgram") -> None:
"""Set the shader to use for compositing."""
self._shader = shader
def getLayerBindings(self) -> List[str]:
"""Get the current layer bindings."""
return self._layer_bindings
def setLayerBindings(self, bindings: List[str]) -> None:
"""Set the layer bindings to use.
This should be a list of RenderPass names. The passes will be bound
to different texture units in the order specified. By default, the output
of the "default" RenderPass is bound to texture unit 0 and the output of
the "selection" RenderPass is bound to texture unit 1.
:param bindings: The list of layer bindings to use.
"""
self._layer_bindings = bindings
def render(self) -> None:
"""Perform the actual rendering of the render pass."""
self._shader.bind()
outline_size = 2.0
step_x = outline_size / self._width
step_y = outline_size / self._height
offset = [
[-step_x, -step_y], [0.0, -step_y], [step_x, -step_y],
[-step_x, 0.0], [0.0, 0.0], [step_x, 0.0],
[-step_x, step_y], [0.0, step_y], [step_x, step_y]
]
self._shader.setUniformValue("u_offset", offset)
texture_unit = 0
for binding in self._layer_bindings:
render_pass = self._renderer.getRenderPass(binding)
if not render_pass:
continue
self._gl.glActiveTexture(getattr(self._gl, "GL_TEXTURE{0}".format(texture_unit)))
self._gl.glBindTexture(self._gl.GL_TEXTURE_2D, render_pass.getTextureId())
texture_unit += 1
self._renderer.renderFullScreenQuad(self._shader)
for i in range(texture_unit):
self._gl.glActiveTexture(getattr(self._gl, "GL_TEXTURE{0}".format(i)))
self._gl.glBindTexture(self._gl.GL_TEXTURE_2D, 0)
self._shader.release()
self._gl.glActiveTexture(self._gl.GL_TEXTURE0) |
299,791 | test set text | # Copyright 2012,2014 Christoph Reiter
# 2016 Nick Boultbee
# 2019 Ruud van Asseldonk
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from quodlibet.browsers.covergrid.main import CoverGrid
from senf import fsnative
from . import TestCase, run_gtk_loop
from .helper import realized
from quodlibet import config
from quodlibet.browsers.albums.prefs import DEFAULT_PATTERN_TEXT
from quodlibet.formats import AudioFile
from quodlibet.library import SongLibrary, SongLibrarian
SONGS = [
AudioFile({
"album": "one",
"artist": "piman",
"~filename": fsnative(u"/dev/null"),
}),
AudioFile({
"album": "two",
"artist": "mu",
"~filename": fsnative(u"/dev/zero"),
}),
AudioFile({
"album": "three",
"artist": "boris",
"~filename": fsnative(u"/bin/ls"),
}),
AudioFile({
"album": "three",
"artist": "boris",
"~filename": fsnative(u"/bin/ls2"),
}),
]
SONGS.sort()
class TCoverGridBrowser(TestCase):
def setUp(self):
config.init()
library = SongLibrary()
library.librarian = SongLibrarian()
CoverGrid.init(library)
for af in SONGS:
af.sanitize()
library.add(SONGS)
self.bar = CoverGrid(library)
self._id = self.bar.connect("songs-selected", self._selected)
self._id2 = self.bar.connect("songs-activated", self._activated)
with realized(self.bar):
self.bar.filter_text("")
self._wait()
self.songs = []
self.activated = False
def tearDown(self):
self.bar.disconnect(self._id)
self.bar.disconnect(self._id2)
self.bar.destroy()
del self.bar
config.quit()
def _activated(self, albumlist):
self.activated = True
def _selected(self, albumlist, songs, *args):
self.songs = songs
def _wait(self):
run_gtk_loop()
def test_activated(self):
with realized(self.bar):
view = self.bar.view
child = view.get_child_at_index(0)
child.emit('activate')
self._wait()
self.failUnless(self.activated)
def test_can_filter(self):
with realized(self.bar):
self.failUnless(self.bar.can_filter(None))
self.failUnless(self.bar.can_filter("album"))
self.failUnless(self.bar.can_filter("foobar"))
self.failIf(self.bar.can_filter("~#length"))
self.failIf(self.bar.can_filter("title"))
def METHOD_NAME(self):
with realized(self.bar):
self.bar.filter_text("artist=piman")
self._wait()
self.failUnlessEqual(len(self.songs), 1)
self.bar.filter_text("")
self._wait()
self.failUnlessEqual(set(self.songs), set(SONGS))
def test_filter_album(self):
with realized(self.bar):
self.bar.filter_text("dsagfsag")
self._wait()
self.failUnlessEqual(len(self.songs), 0)
self.bar.filter_text("")
self._wait()
self.bar.filter("album", ["one", "three"])
self._wait()
self.failUnlessEqual(len(self.songs), 3)
def test_filter_artist(self):
with realized(self.bar):
self.bar.filter("artist", ["piman"])
self._wait()
self.failUnlessEqual(len(self.songs), 1)
self.failUnlessEqual(self.songs[0]("artist"), "piman")
def test_header(self):
self.failIf(self.bar.headers)
def test_list(self):
albums = self.bar.list_albums()
self.failUnlessEqual(set(albums), {s.album_key for s in SONGS})
self.bar.filter_albums([SONGS[0].album_key])
self._wait()
self.failUnlessEqual({s.album_key for s in self.songs},
{SONGS[0].album_key})
def test_active_filter(self):
with realized(self.bar):
self.bar.filter("artist", ["piman"])
self._wait()
self.failUnless(self.bar.active_filter(self.songs[0]))
for s in SONGS:
if s is not self.songs[0]:
self.failIf(self.bar.active_filter(s))
def test_default_display_pattern(self):
pattern_text = self.bar.display_pattern_text
self.failUnlessEqual(pattern_text, DEFAULT_PATTERN_TEXT)
self.failUnless("<album>" in pattern_text) |
299,792 | float par | """Internal module for passing/getting Python parameters to/from C/C++."""
# SyneRBI Synergistic Image Reconstruction Framework (SIRF)
# Copyright 2015 - 2021 Rutherford Appleton Laboratory STFC
# Copyright 2015 - 2022 University College London
# Copyright 2019 University of Hull
#
# This is software developed for the Collaborative Computational
# Project in Synergistic Reconstruction for Biomedical Imaging
# (formerly CCP PETMR)
# (http://www.ccpsynerbi.ac.uk/).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import sirf.select_module as select_module
import sirf.pyiutilities as pyiutil
from sirf.Utilities import check_status
#from sirf.pystir import setParameter, parameter
from sirf.pystir import cSTIR_setParameter as setParameter
from sirf.pystir import cSTIR_parameter as parameter
def set_parameter(hs, group, par, hv, stack = None):
if stack is None:
stack = inspect.stack()[1]
h = setParameter(hs, group, par, hv)
check_status(h, stack)
pyiutil.deleteDataHandle(h)
def set_char_par(handle, group, par, value):
h = pyiutil.charDataHandle(value)
set_parameter(handle, group, par, h, inspect.stack()[1])
pyiutil.deleteDataHandle(h)
def set_int_par(handle, group, par, value):
h = pyiutil.intDataHandle(int(value))
set_parameter(handle, group, par, h, inspect.stack()[1])
pyiutil.deleteDataHandle(h)
def set_bool_par(handle, group, par, value):
h = pyiutil.boolDataHandle(bool(value))
set_parameter(handle, group, par, h, inspect.stack()[1])
pyiutil.deleteDataHandle(h)
def set_float_par(handle, group, par, value):
h = pyiutil.floatDataHandle(float(value))
set_parameter(handle, group, par, h, inspect.stack()[1])
pyiutil.deleteDataHandle(h)
def bool_par(handle, group, par):
h = parameter(handle, group, par)
check_status(h, inspect.stack()[1])
value = pyiutil.boolDataFromHandle(h)
pyiutil.deleteDataHandle(h)
return value
def char_par(handle, group, par):
h = parameter(handle, group, par)
check_status(h)
value = pyiutil.charDataFromHandle(h)
pyiutil.deleteDataHandle(h)
return value
def int_par(handle, group, par):
h = parameter(handle, group, par)
check_status(h, inspect.stack()[1])
value = pyiutil.intDataFromHandle(h)
pyiutil.deleteDataHandle(h)
return value
def int_pars(handle, group, par, n):
h = parameter(handle, group, par)
check_status(h)
value = ()
for i in range(n):
value += (pyiutil.intDataItemFromHandle(h, i),)
pyiutil.deleteDataHandle(h)
return value
def uint16_pars(handle, group, par, n):
h = parameter(handle, group, par)
check_status(h)
value = ()
for i in range(n):
value += (pyiutil.uint16DataItemFromHandle(h, i),)
pyiutil.deleteDataHandle(h)
return value
def uint32_pars(handle, group, par, n):
h = parameter(handle, group, par)
check_status(h)
value = ()
for i in range(n):
value += (pyiutil.uint32DataItemFromHandle(h, i),)
pyiutil.deleteDataHandle(h)
return value
def uint64_pars(handle, group, par, n):
h = parameter(handle, group, par)
check_status(h)
value = ()
for i in range(n):
value += (pyiutil.uint64DataItemFromHandle(h, i),)
pyiutil.deleteDataHandle(h)
return value
def METHOD_NAME(handle, group, par):
h = parameter(handle, group, par)
check_status(h)
v = pyiutil.floatDataFromHandle(h)
pyiutil.deleteDataHandle(h)
return v
def float_pars(handle, group, par, n):
h = parameter(handle, group, par)
check_status(h)
value = ()
for i in range(n):
value += (pyiutil.floatDataItemFromHandle(h, i),)
pyiutil.deleteDataHandle(h)
return value
def parameter_handle(hs, group, par):
handle = parameter(hs, group, par)
check_status(handle, inspect.stack()[1])
return handle |
299,793 | tear down | #!/usr/bin/env python3
import math
import os
import random
import unittest
import torch
from torch import optim
import gpytorch
from gpytorch.distributions import MultitaskMultivariateNormal
from gpytorch.kernels import MultitaskKernel, RBFKernel
from gpytorch.likelihoods import MultitaskGaussianLikelihood
from gpytorch.means import ConstantMean, MultitaskMean
# Batch training test: Let's learn hyperparameters on a sine dataset, but test on a sine dataset and a cosine dataset
# in parallel.
train_x1 = torch.linspace(0, 1, 11).unsqueeze(-1)
train_y1 = torch.cat([torch.sin(train_x1 * (2 * math.pi)), torch.cos(train_x1 * (2 * math.pi))], 1)
test_x1 = torch.linspace(0, 1, 51).unsqueeze(-1)
test_y1 = torch.cat([torch.sin(test_x1 * (2 * math.pi)), torch.cos(test_x1 * (2 * math.pi))], 1)
train_x2 = torch.linspace(0, 1, 11).unsqueeze(-1)
train_y2 = torch.cat([torch.sin(train_x2 * (2 * math.pi)), torch.cos(train_x2 * (2 * math.pi))], 1)
test_x2 = torch.linspace(0, 1, 51).unsqueeze(-1)
test_y2 = torch.cat([torch.sin(test_x2 * (2 * math.pi)), torch.cos(test_x2 * (2 * math.pi))], 1)
# Combined sets of data
train_x12 = torch.cat((train_x1.unsqueeze(0), train_x2.unsqueeze(0)), dim=0).contiguous()
train_y12 = torch.cat((train_y1.unsqueeze(0), train_y2.unsqueeze(0)), dim=0).contiguous()
test_x12 = torch.cat((test_x1.unsqueeze(0), test_x2.unsqueeze(0)), dim=0).contiguous()
class ExactGPModel(gpytorch.models.ExactGP):
def __init__(self, train_inputs, train_targets, likelihood, batch_shape=torch.Size()):
super(ExactGPModel, self).__init__(train_inputs, train_targets, likelihood)
self.mean_module = MultitaskMean(
ConstantMean(batch_shape=batch_shape, constant_prior=gpytorch.priors.SmoothedBoxPrior(-1, 1)), num_tasks=2
)
self.covar_module = MultitaskKernel(
RBFKernel(
batch_shape=batch_shape,
lengthscale_prior=gpytorch.priors.NormalPrior(loc=torch.tensor(0.0), scale=torch.tensor(1.0)),
),
num_tasks=2,
rank=1,
)
def forward(self, x):
mean_x = self.mean_module(x)
covar_x = self.covar_module(x)
return MultitaskMultivariateNormal(mean_x, covar_x)
class TestBatchMultitaskGPRegression(unittest.TestCase):
def setUp(self):
if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false":
self.rng_state = torch.get_rng_state()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
random.seed(0)
def METHOD_NAME(self):
if hasattr(self, "rng_state"):
torch.set_rng_state(self.rng_state)
def test_train_on_single_set_test_on_batch(self):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = MultitaskGaussianLikelihood(
noise_prior=gpytorch.priors.NormalPrior(loc=torch.zeros(1), scale=torch.ones(1)), num_tasks=2
)
gp_model = ExactGPModel(train_x1, train_y1, likelihood)
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
optimizer.n_iter = 0
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x1)
loss = -mll(output, train_y1).sum()
loss.backward()
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
# Make predictions for both sets of test points, and check MAEs.
batch_predictions = likelihood(gp_model(test_x12))
preds1 = batch_predictions.mean[0]
preds2 = batch_predictions.mean[1]
mean_abs_error1 = torch.mean(torch.abs(test_y1 - preds1))
mean_abs_error2 = torch.mean(torch.abs(test_y2 - preds2))
self.assertLess(mean_abs_error1.squeeze().item(), 0.05)
self.assertLess(mean_abs_error2.squeeze().item(), 0.05)
def test_train_on_batch_test_on_batch(self):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = MultitaskGaussianLikelihood(
noise_prior=gpytorch.priors.NormalPrior(loc=torch.zeros(2), scale=torch.ones(2)),
batch_shape=torch.Size([2]),
num_tasks=2,
)
gp_model = ExactGPModel(train_x12, train_y12, likelihood, batch_shape=torch.Size([2]))
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x12)
loss = -mll(output, train_y12).sum()
loss.backward()
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
# Make predictions for both sets of test points, and check MAEs.
batch_predictions = likelihood(gp_model(test_x12))
preds1 = batch_predictions.mean[0]
preds2 = batch_predictions.mean[1]
mean_abs_error1 = torch.mean(torch.abs(test_y1 - preds1))
mean_abs_error2 = torch.mean(torch.abs(test_y2 - preds2))
self.assertLess(mean_abs_error1.squeeze().item(), 0.05)
self.assertLess(mean_abs_error2.squeeze().item(), 0.05)
def test_train_on_batch_test_on_batch_shared_hypers_over_batch(self):
# We're manually going to set the hyperparameters to something they shouldn't be
likelihood = MultitaskGaussianLikelihood(
noise_prior=gpytorch.priors.NormalPrior(loc=torch.zeros(2), scale=torch.ones(2)),
batch_shape=torch.Size(),
num_tasks=2,
)
gp_model = ExactGPModel(train_x12, train_y12, likelihood, batch_shape=torch.Size())
mll = gpytorch.ExactMarginalLogLikelihood(likelihood, gp_model)
# Find optimal model hyperparameters
gp_model.train()
likelihood.train()
optimizer = optim.Adam(gp_model.parameters(), lr=0.1)
for _ in range(50):
optimizer.zero_grad()
output = gp_model(train_x12)
loss = -mll(output, train_y12).sum()
loss.backward()
optimizer.step()
for param in gp_model.parameters():
self.assertTrue(param.grad is not None)
self.assertGreater(param.grad.norm().item(), 0)
# Test the model
gp_model.eval()
likelihood.eval()
# Make predictions for both sets of test points, and check MAEs.
batch_predictions = likelihood(gp_model(test_x12))
preds1 = batch_predictions.mean[0]
preds2 = batch_predictions.mean[1]
mean_abs_error1 = torch.mean(torch.abs(test_y1 - preds1))
mean_abs_error2 = torch.mean(torch.abs(test_y2 - preds2))
self.assertLess(mean_abs_error1.squeeze().item(), 0.05)
self.assertLess(mean_abs_error2.squeeze().item(), 0.05)
if __name__ == "__main__":
unittest.main() |
299,794 | update | # -*- coding: utf-8 -*-
"""Loader for Static Mesh alembics."""
import os
from openpype.pipeline import (
get_representation_path,
AYON_CONTAINER_ID
)
from openpype.hosts.unreal.api import plugin
from openpype.hosts.unreal.api import pipeline as unreal_pipeline
import unreal # noqa
class StaticMeshAlembicLoader(plugin.Loader):
"""Load Unreal StaticMesh from Alembic"""
families = ["model", "staticMesh"]
label = "Import Alembic Static Mesh"
representations = ["abc"]
icon = "cube"
color = "orange"
@staticmethod
def get_task(filename, asset_dir, asset_name, replace, default_conversion):
task = unreal.AssetImportTask()
options = unreal.AbcImportSettings()
sm_settings = unreal.AbcStaticMeshSettings()
task.set_editor_property('filename', filename)
task.set_editor_property('destination_path', asset_dir)
task.set_editor_property('destination_name', asset_name)
task.set_editor_property('replace_existing', replace)
task.set_editor_property('automated', True)
task.set_editor_property('save', True)
# set import options here
# Unreal 4.24 ignores the settings. It works with Unreal 4.26
options.set_editor_property(
'import_type', unreal.AlembicImportType.STATIC_MESH)
sm_settings.set_editor_property('merge_meshes', True)
if not default_conversion:
conversion_settings = unreal.AbcConversionSettings(
preset=unreal.AbcConversionPreset.CUSTOM,
flip_u=False, flip_v=False,
rotation=[0.0, 0.0, 0.0],
scale=[1.0, 1.0, 1.0])
options.conversion_settings = conversion_settings
options.static_mesh_settings = sm_settings
task.options = options
return task
def load(self, context, name, namespace, options):
"""Load and containerise representation into Content Browser.
This is two step process. First, import FBX to temporary path and
then call `containerise()` on it - this moves all content to new
directory and then it will create AssetContainer there and imprint it
with metadata. This will mark this path as container.
Args:
context (dict): application context
name (str): subset name
namespace (str): in Unreal this is basically path to container.
This is not passed here, so namespace is set
by `containerise()` because only then we know
real path.
data (dict): Those would be data to be imprinted. This is not used
now, data are imprinted by `containerise()`.
Returns:
list(str): list of container content
"""
# Create directory for asset and Ayon container
root = "/Game/Ayon/Assets"
asset = context.get('asset').get('name')
suffix = "_CON"
asset_name = f"{asset}_{name}" if asset else f"{name}"
version = context.get('version')
# Check if version is hero version and use different name
if not version.get("name") and version.get('type') == "hero_version":
name_version = f"{name}_hero"
else:
name_version = f"{name}_v{version.get('name'):03d}"
default_conversion = False
if options.get("default_conversion"):
default_conversion = options.get("default_conversion")
tools = unreal.AssetToolsHelpers().get_asset_tools()
asset_dir, container_name = tools.create_unique_asset_name(
f"{root}/{asset}/{name_version}", suffix="")
container_name += suffix
if not unreal.EditorAssetLibrary.does_directory_exist(asset_dir):
unreal.EditorAssetLibrary.make_directory(asset_dir)
path = self.filepath_from_context(context)
task = self.get_task(
path, asset_dir, asset_name, False, default_conversion)
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task]) # noqa: E501
# Create Asset Container
unreal_pipeline.create_container(
container=container_name, path=asset_dir)
data = {
"schema": "ayon:container-2.0",
"id": AYON_CONTAINER_ID,
"asset": asset,
"namespace": asset_dir,
"container_name": container_name,
"asset_name": asset_name,
"loader": str(self.__class__.__name__),
"representation": context["representation"]["_id"],
"parent": context["representation"]["parent"],
"family": context["representation"]["context"]["family"]
}
unreal_pipeline.imprint(f"{asset_dir}/{container_name}", data)
asset_content = unreal.EditorAssetLibrary.list_assets(
asset_dir, recursive=True, include_folder=True
)
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
return asset_content
def METHOD_NAME(self, container, representation):
name = container["asset_name"]
source_path = get_representation_path(representation)
destination_path = container["namespace"]
task = self.get_task(source_path, destination_path, name, True, False)
# do import fbx and replace existing data
unreal.AssetToolsHelpers.get_asset_tools().import_asset_tasks([task])
container_path = "{}/{}".format(container["namespace"],
container["objectName"])
# update metadata
unreal_pipeline.imprint(
container_path,
{
"representation": str(representation["_id"]),
"parent": str(representation["parent"])
})
asset_content = unreal.EditorAssetLibrary.list_assets(
destination_path, recursive=True, include_folder=True
)
for a in asset_content:
unreal.EditorAssetLibrary.save_asset(a)
def remove(self, container):
path = container["namespace"]
parent_path = os.path.dirname(path)
unreal.EditorAssetLibrary.delete_directory(path)
asset_content = unreal.EditorAssetLibrary.list_assets(
parent_path, recursive=False
)
if len(asset_content) == 0:
unreal.EditorAssetLibrary.delete_directory(parent_path) |
299,795 | test get builds exclude list match | #!/usr/bin/env python3
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for make_db."""
import sys
import unittest
import make_db
import model
static_epoch = 1641585162
TEST_BUCKETS_DATA = {
'gs://kubernetes-jenkins/logs/': {'prefix': ''},
'gs://bucket1/': {'prefix': 'bucket1_prefix'},
'gs://bucket2/': {'prefix': 'bucket2_prefix'}
}
class MockedClient(make_db.GCSClient):
"""A GCSClient with stubs for external interactions."""
NOW = static_epoch
LOG_DIR = 'gs://kubernetes-jenkins/logs/'
JOB_DIR = LOG_DIR + 'fake/123/'
ART_DIR = JOB_DIR + 'artifacts/'
lists = {
LOG_DIR: [LOG_DIR + 'fake/'],
LOG_DIR + 'fake/': [JOB_DIR, LOG_DIR + 'fake/122/'],
LOG_DIR + 'bad-latest/': [LOG_DIR + 'bad-latest/6/'],
LOG_DIR + 'latest/': [LOG_DIR + 'latest/4/', LOG_DIR + 'latest/3/'],
'gs://kubernetes-jenkins/pr-logs/directory/': [],
ART_DIR: [ART_DIR + 'junit_01.xml'],
ART_DIR.replace('123', '122'): [],
}
gets = {
JOB_DIR + 'finished.json': {'timestamp': NOW, 'result': 'SUCCESS'},
JOB_DIR + 'started.json': {'timestamp': NOW - 5},
LOG_DIR + 'latest/latest-build.txt': '4',
LOG_DIR + 'bad-latest/latest-build.txt': 'asdf',
LOG_DIR + 'fake/122/finished.json': {'timestamp': 123},
ART_DIR + 'junit_01.xml': '''
<testsuite>
<testcase name="Foo" time="3" />
<testcase name="Bad" time="4">
<failure>stacktrace</failure>
</testcase>
<testcase name="Lazy" time="0">
<skipped />
</testcase>
</testsuite>
'''}
def get(self, path, as_json=True):
return self.gets.get(path)
def ls(self, path, **_kwargs): # pylint: disable=arguments-differ
return self.lists[path]
class GCSClientTest(unittest.TestCase):
"""Unit tests for GCSClient"""
# pylint: disable=protected-access
JOBS_DIR = 'gs://kubernetes-jenkins/logs/'
def setUp(self):
self.client = MockedClient(self.JOBS_DIR)
def test_get_junits(self):
junits = self.client.get_junits_from_build(self.JOBS_DIR + 'fake/123')
self.assertEqual(
sorted(junits),
['gs://kubernetes-jenkins/logs/fake/123/artifacts/junit_01.xml'])
def test_get_builds_normal_list(self):
# normal case: lists a directory
self.assertEqual((True, ['123', '122']), self.client._get_builds('fake'))
def test_get_builds_latest(self):
# optimization: does a range based on build-latest.txt
precise, gen = self.client._get_builds('latest')
self.assertFalse(precise)
self.assertEqual(['4', '3', '2', '1'], list(gen))
def test_get_builds_limit(self):
# optimization: does a range based on build-latest.txt
precise, gen = self.client._get_builds('latest', build_limit=2)
self.assertFalse(precise)
self.assertEqual(['4', '3'], list(gen))
def test_get_builds_latest_fallback(self):
# fallback: still lists a directory when build-latest.txt isn't an int
self.assertEqual((True, ['6']), self.client._get_builds('bad-latest'))
def test_get_builds_non_sequential(self):
# fallback: setting sequential=false causes directory listing
self.client.metadata = {'sequential': False}
self.assertEqual((True, ['4', '3']),
self.client._get_builds('latest'))
def test_get_builds_exclude_list_no_match(self):
# special case: job is not in excluded list
self.client.metadata = {'exclude_jobs': ['notfake']}
self.assertEqual([('fake', '123'), ('fake', '122')], list(self.client.get_builds(set())))
def METHOD_NAME(self):
# special case: job is in excluded list
self.client.metadata = {'exclude_jobs': ['fake']}
self.assertEqual([], list(self.client.get_builds(set())))
def test_get_builds_exclude_list_match_using_regexp(self):
# special case: job is in excluded list
self.client.metadata = {'exclude_jobs': ['.*(flaky|flake|fake).*']}
self.assertEqual([], list(self.client.get_builds(set())))
# special case: job is in excluded list
self.client.metadata = {'exclude_jobs': ['.*(flaky|flake).*']}
self.assertEqual([('fake', '123'), ('fake', '122')], list(self.client.get_builds(set())))
class MainTest(unittest.TestCase):
"""End-to-end test of the main function's output."""
JOBS_DIR = GCSClientTest.JOBS_DIR
def test_remove_system_out(self):
self.assertEqual(make_db.remove_system_out('not<xml<lol'), 'not<xml<lol')
self.assertEqual(
make_db.remove_system_out('<a><b>c<system-out>bar</system-out></b></a>'),
'<a><b>c</b></a>')
@staticmethod
def get_expected_builds():
return {
MockedClient.JOB_DIR.replace('123', '122')[:-1]:
(None, {'timestamp': 123}, []),
MockedClient.JOB_DIR[:-1]:
({'timestamp': MockedClient.NOW - 5},
{'timestamp': MockedClient.NOW, 'result': 'SUCCESS'},
[MockedClient.gets[MockedClient.ART_DIR + 'junit_01.xml']])
}
def assert_main_output(self, threads, expected=None, db=None,
client=MockedClient):
if expected is None:
expected = self.get_expected_builds()
if db is None:
db = model.Database(':memory:')
make_db.main(db, {self.JOBS_DIR: {}}, threads, True, sys.maxsize, False, client)
result = {path: (started, finished, db.test_results_for_build(path))
for _rowid, path, started, finished in db.get_builds()}
self.assertEqual(result, expected)
return db
def test_clean(self):
self.maxDiff = None
for threads in [1, 32]:
self.assert_main_output(threads)
def test_incremental_new(self):
db = self.assert_main_output(1)
new_junit = '''
<testsuite>
<testcase name="New" time="8"/>
<testcase name="Foo" time="2.3"/>
</testsuite>
'''
class MockedClientNewer(MockedClient):
NOW = static_epoch
LOG_DIR = 'gs://kubernetes-jenkins/logs/'
JOB_DIR = LOG_DIR + 'fake/124/'
ART_DIR = JOB_DIR + 'artifacts/'
lists = {
LOG_DIR: [LOG_DIR + 'fake/'],
LOG_DIR + 'fake/': [JOB_DIR, LOG_DIR + 'fake/123/'],
ART_DIR: [ART_DIR + 'junit_01.xml'],
'gs://kubernetes-jenkins/pr-logs/directory/': [],
}
gets = {
JOB_DIR + 'finished.json': {'timestamp': NOW},
ART_DIR + 'junit_01.xml': new_junit,
}
expected = self.get_expected_builds()
expected[MockedClientNewer.JOB_DIR[:-1]] = (
None, {'timestamp': MockedClientNewer.NOW}, [new_junit])
self.assert_main_output(1, expected, db, MockedClientNewer)
if __name__ == '__main__':
unittest.main() |
299,796 | poly result dtype | """
Implementation of operations involving polynomials.
"""
import numpy as np
from numpy.polynomial import polynomial as poly
from numpy.polynomial import polyutils as pu
from numba import typeof
from numba.core import types, errors
from numba.core.extending import overload
from numba.np.numpy_support import type_can_asarray, as_dtype, from_dtype
@overload(np.roots)
def roots_impl(p):
# cast int vectors to float cf. numpy, this is a bit dicey as
# the roots could be complex which will fail anyway
ty = getattr(p, 'dtype', p)
if isinstance(ty, types.Integer):
cast_t = np.float64
else:
cast_t = as_dtype(ty)
def roots_impl(p):
# impl based on numpy:
# https://github.com/numpy/numpy/blob/master/numpy/lib/polynomial.py
if len(p.shape) != 1:
raise ValueError("Input must be a 1d array.")
non_zero = np.nonzero(p)[0]
if len(non_zero) == 0:
return np.zeros(0, dtype=cast_t)
tz = len(p) - non_zero[-1] - 1
# pull out the coeffs selecting between possible zero pads
p = p[int(non_zero[0]):int(non_zero[-1]) + 1]
n = len(p)
if n > 1:
# construct companion matrix, ensure fortran order
# to give to eigvals, write to upper diag and then
# transpose.
A = np.diag(np.ones((n - 2,), cast_t), 1).T
A[0, :] = -p[1:] / p[0] # normalize
roots = np.linalg.eigvals(A)
else:
roots = np.zeros(0, dtype=cast_t)
# add in additional zeros on the end if needed
if tz > 0:
return np.hstack((roots, np.zeros(tz, dtype=cast_t)))
else:
return roots
return roots_impl
@overload(pu.trimseq)
def polyutils_trimseq(seq):
if not type_can_asarray(seq):
msg = 'The argument "seq" must be array-like'
raise errors.TypingError(msg)
if isinstance(seq, types.BaseTuple):
msg = 'Unsupported type %r for argument "seq"'
raise errors.TypingError(msg % (seq))
if np.ndim(seq) > 1:
msg = 'Coefficient array is not 1-d'
raise errors.NumbaValueError(msg)
def impl(seq):
if len(seq) == 0:
return seq
else:
for i in range(len(seq) - 1, -1, -1):
if seq[i] != 0:
break
return seq[:i+1]
return impl
def METHOD_NAME(tup):
# A helper function that takes a tuple of inputs and returns their result
# dtype. Used for poly functions.
res_dtype = np.float64
for item in tup:
if isinstance(item, types.Number):
s1 = str(as_dtype(item))
elif isinstance(item, types.Tuple):
t = [as_dtype(ty) for ty in item.types]
s1 = str(np.result_type(*t))
else:
s1 = str(item.dtype)
res_dtype = (np.result_type(res_dtype, s1))
return from_dtype(res_dtype)
@overload(poly.polyadd)
def numpy_polyadd(c1, c2):
if not type_can_asarray(c1):
msg = 'The argument "c1" must be array-like'
raise errors.TypingError(msg)
if not type_can_asarray(c2):
msg = 'The argument "c2" must be array-like'
raise errors.TypingError(msg)
if np.ndim(c1) > 1 or np.ndim(c2) > 1:
msg = 'Coefficient array is not 1-d'
raise errors.NumbaValueError(msg)
result_dtype = METHOD_NAME((c1, c2))
def impl(c1, c2):
c1 = np.asarray(c1)
c2 = np.asarray(c2)
arr1 = np.atleast_1d(c1).astype(result_dtype)
arr2 = np.atleast_1d(c2).astype(result_dtype)
diff = len(arr2) - len(arr1)
if diff > 0:
zr = np.zeros(diff)
arr1 = np.concatenate((arr1, zr))
if diff < 0:
zr = np.zeros(-diff)
arr2 = np.concatenate((arr2, zr))
val = arr1 + arr2
return pu.trimseq(val)
return impl
@overload(poly.polysub)
def numpy_polysub(c1, c2):
if not type_can_asarray(c1):
msg = 'The argument "c1" must be array-like'
raise errors.TypingError(msg)
if not type_can_asarray(c2):
msg = 'The argument "c2" must be array-like'
raise errors.TypingError(msg)
if np.ndim(c1) > 1 or np.ndim(c2) > 1:
msg = 'Coefficient array is not 1-d'
raise errors.NumbaValueError(msg)
result_dtype = METHOD_NAME((c1, c2))
def impl(c1, c2):
c1 = np.asarray(c1)
c2 = np.asarray(c2)
arr1 = np.atleast_1d(c1).astype(result_dtype)
arr2 = np.atleast_1d(c2).astype(result_dtype)
diff = len(arr2) - len(arr1)
if diff > 0:
zr = np.zeros(diff)
arr1 = np.concatenate((arr1, zr))
if diff < 0:
zr = np.zeros(-diff)
arr2 = np.concatenate((arr2, zr))
val = arr1 - arr2
return pu.trimseq(val)
return impl
@overload(poly.polymul)
def numpy_polymul(c1, c2):
if not type_can_asarray(c1):
msg = 'The argument "c1" must be array-like'
raise errors.TypingError(msg)
if not type_can_asarray(c2):
msg = 'The argument "c2" must be array-like'
raise errors.TypingError(msg)
if np.ndim(c1) > 1 or np.ndim(c2) > 1:
msg = 'Coefficient array is not 1-d'
raise errors.NumbaValueError(msg)
result_dtype = METHOD_NAME((c1, c2))
def impl(c1, c2):
c1 = np.asarray(c1)
c2 = np.asarray(c2)
arr1 = np.atleast_1d(c1)
arr2 = np.atleast_1d(c2)
val = np.convolve(arr1, arr2).astype(result_dtype)
return pu.trimseq(val)
return imp |
299,797 | create widgets | "Dialog to specify or edit the parameters for a user configured help source."
import os
import sys
from Tkinter import *
import tkMessageBox
import tkFileDialog
class GetHelpSourceDialog(Toplevel):
def __init__(self, parent, title, menuItem='', filePath='', _htest=False):
"""Get menu entry and url/ local file location for Additional Help
User selects a name for the Help resource and provides a web url
or a local file as its source. The user can enter a url or browse
for the file.
_htest - bool, change box location when running htest
"""
Toplevel.__init__(self, parent)
self.configure(borderwidth=5)
self.resizable(height=FALSE, width=FALSE)
self.title(title)
self.transient(parent)
self.grab_set()
self.protocol("WM_DELETE_WINDOW", self.Cancel)
self.parent = parent
self.result = None
self.METHOD_NAME()
self.menu.set(menuItem)
self.path.set(filePath)
self.withdraw() #hide while setting geometry
#needs to be done here so that the winfo_reqwidth is valid
self.update_idletasks()
#centre dialog over parent. below parent if running htest.
self.geometry(
"+%d+%d" % (
parent.winfo_rootx() +
(parent.winfo_width()/2 - self.winfo_reqwidth()/2),
parent.winfo_rooty() +
((parent.winfo_height()/2 - self.winfo_reqheight()/2)
if not _htest else 150)))
self.deiconify() #geometry set, unhide
self.bind('<Return>', self.Ok)
self.wait_window()
def METHOD_NAME(self):
self.menu = StringVar(self)
self.path = StringVar(self)
self.fontSize = StringVar(self)
self.frameMain = Frame(self, borderwidth=2, relief=GROOVE)
self.frameMain.pack(side=TOP, expand=TRUE, fill=BOTH)
labelMenu = Label(self.frameMain, anchor=W, justify=LEFT,
text='Menu Item:')
self.entryMenu = Entry(self.frameMain, textvariable=self.menu,
width=30)
self.entryMenu.focus_set()
labelPath = Label(self.frameMain, anchor=W, justify=LEFT,
text='Help File Path: Enter URL or browse for file')
self.entryPath = Entry(self.frameMain, textvariable=self.path,
width=40)
self.entryMenu.focus_set()
labelMenu.pack(anchor=W, padx=5, pady=3)
self.entryMenu.pack(anchor=W, padx=5, pady=3)
labelPath.pack(anchor=W, padx=5, pady=3)
self.entryPath.pack(anchor=W, padx=5, pady=3)
browseButton = Button(self.frameMain, text='Browse', width=8,
command=self.browseFile)
browseButton.pack(pady=3)
frameButtons = Frame(self)
frameButtons.pack(side=BOTTOM, fill=X)
self.buttonOk = Button(frameButtons, text='OK',
width=8, default=ACTIVE, command=self.Ok)
self.buttonOk.grid(row=0, column=0, padx=5,pady=5)
self.buttonCancel = Button(frameButtons, text='Cancel',
width=8, command=self.Cancel)
self.buttonCancel.grid(row=0, column=1, padx=5, pady=5)
def browseFile(self):
filetypes = [
("HTML Files", "*.htm *.html", "TEXT"),
("PDF Files", "*.pdf", "TEXT"),
("Windows Help Files", "*.chm"),
("Text Files", "*.txt", "TEXT"),
("All Files", "*")]
path = self.path.get()
if path:
dir, base = os.path.split(path)
else:
base = None
if sys.platform[:3] == 'win':
dir = os.path.join(os.path.dirname(sys.executable), 'Doc')
if not os.path.isdir(dir):
dir = os.getcwd()
else:
dir = os.getcwd()
opendialog = tkFileDialog.Open(parent=self, filetypes=filetypes)
file = opendialog.show(initialdir=dir, initialfile=base)
if file:
self.path.set(file)
def MenuOk(self):
"Simple validity check for a sensible menu item name"
menuOk = True
menu = self.menu.get()
menu.strip()
if not menu:
tkMessageBox.showerror(title='Menu Item Error',
message='No menu item specified',
parent=self)
self.entryMenu.focus_set()
menuOk = False
elif len(menu) > 30:
tkMessageBox.showerror(title='Menu Item Error',
message='Menu item too long:'
'\nLimit 30 characters.',
parent=self)
self.entryMenu.focus_set()
menuOk = False
return menuOk
def PathOk(self):
"Simple validity check for menu file path"
pathOk = True
path = self.path.get()
path.strip()
if not path: #no path specified
tkMessageBox.showerror(title='File Path Error',
message='No help file path specified.',
parent=self)
self.entryPath.focus_set()
pathOk = False
elif path.startswith(('www.', 'http')):
pass
else:
if path[:5] == 'file:':
path = path[5:]
if not os.path.exists(path):
tkMessageBox.showerror(title='File Path Error',
message='Help file path does not exist.',
parent=self)
self.entryPath.focus_set()
pathOk = False
return pathOk
def Ok(self, event=None):
if self.MenuOk() and self.PathOk():
self.result = (self.menu.get().strip(),
self.path.get().strip())
if sys.platform == 'darwin':
path = self.result[1]
if path.startswith(('www', 'file:', 'http:')):
pass
else:
# Mac Safari insists on using the URI form for local files
self.result = list(self.result)
self.result[1] = "file://" + path
self.destroy()
def Cancel(self, event=None):
self.result = None
self.destroy()
if __name__ == '__main__':
from idlelib.idle_test.htest import run
run(GetHelpSourceDialog) |
299,798 | test write header | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the JSON output module."""
import json
import io
import os
import sys
import unittest
from dfvfs.lib import definitions as dfvfs_definitions
from dfvfs.path import factory as path_spec_factory
from plaso.lib import definitions
from plaso.output import json_out
from tests import test_lib as shared_test_lib
from tests.containers import test_lib as containers_test_lib
from tests.output import test_lib
class JSONOutputTest(test_lib.OutputModuleTestCase):
"""Tests for the JSON output module."""
# pylint: disable=protected-access
_OS_PATH_SPEC = path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_OS, location='{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd')))
_TEST_EVENTS = [
{'_parser_chain': 'test',
'data_type': 'test:event',
'hostname': 'ubuntu',
'path_spec': path_spec_factory.Factory.NewPathSpec(
dfvfs_definitions.TYPE_INDICATOR_TSK, inode=15,
location='/var/log/syslog.1', parent=_OS_PATH_SPEC),
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): session\n '
'closed for user root)'),
'timestamp': '2012-06-27 18:17:01',
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root'}]
def testWriteFieldValues(self):
"""Tests the _WriteFieldValues function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
formatters_directory_path = self._GetTestFilePath(['formatters'])
output_mediator.ReadMessageFormattersFromDirectory(
formatters_directory_path)
output_module = json_out.JSONOutputModule()
output_module._file_object = test_file_object
event, event_data, event_data_stream = (
containers_test_lib.CreateEventFromValues(self._TEST_EVENTS[0]))
# TODO: add test for event_tag.
field_values = output_module._GetFieldValues(
output_mediator, event, event_data, event_data_stream, None)
output_module._WriteFieldValues(output_mediator, field_values)
expected_timestamp = shared_test_lib.CopyTimestampFromString(
'2012-06-27 18:17:01')
if sys.platform.startswith('win'):
# The dict comparison is very picky on Windows hence we
# have to make sure the drive letter is in the same case.
expected_os_location = os.path.abspath('\\{0:s}'.format(
os.path.join('cases', 'image.dd')))
else:
expected_os_location = '{0:s}{1:s}'.format(
os.path.sep, os.path.join('cases', 'image.dd'))
expected_json_dict = {
'event_0': {
'__container_type__': 'event',
'__type__': 'AttributeContainer',
'date_time': {
'__class_name__': 'PosixTimeInMicroseconds',
'__type__': 'DateTimeValues',
'timestamp': 1340821021000000,
},
'data_type': 'test:event',
'display_name': 'TSK:/var/log/syslog.1',
'filename': '/var/log/syslog.1',
'hostname': 'ubuntu',
'inode': '15',
'message': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
'session closed for user root)'),
'parser': 'test',
'pathspec': {
'__type__': 'PathSpec',
'type_indicator': 'TSK',
'location': '/var/log/syslog.1',
'inode': 15,
'parent': {
'__type__': 'PathSpec',
'type_indicator': 'OS',
'location': expected_os_location,
}
},
'text': (
'Reporter <CRON> PID: |8442| (pam_unix(cron:session): '
'session\n closed for user root)'),
'timestamp': expected_timestamp,
'timestamp_desc': definitions.TIME_DESCRIPTION_UNKNOWN,
'username': 'root',
}
}
event_body = test_file_object.getvalue()
# We need to compare dicts since we cannot determine the order
# of values in the string.
json_string = '{{ {0:s} }}'.format(event_body)
json_dict = json.loads(json_string)
self.assertEqual(json_dict, expected_json_dict)
def testWriteFooter(self):
"""Tests the WriteFooter function."""
test_file_object = io.StringIO()
output_module = json_out.JSONOutputModule()
output_module._file_object = test_file_object
output_module.WriteFooter()
footer = test_file_object.getvalue()
self.assertEqual(footer, '}')
def METHOD_NAME(self):
"""Tests the WriteHeader function."""
test_file_object = io.StringIO()
output_mediator = self._CreateOutputMediator()
output_module = json_out.JSONOutputModule()
output_module._file_object = test_file_object
output_module.WriteHeader(output_mediator)
header = test_file_object.getvalue()
self.assertEqual(header, '{')
if __name__ == '__main__':
unittest.main() |
299,799 | list profiles | import asyncio, discord, time
from operator import itemgetter
from discord.ext import commands
from Cogs import Utils, Settings, ReadableTime, DisplayName, Message, Nullify
def setup(bot):
# Add the bot and deps
settings = bot.get_cog("Settings")
bot.add_cog(Profile(bot, settings))
# This is the profiles module.
class Profile(commands.Cog):
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command(pass_context=True)
async def addprofile(self, ctx, name = None, *, link = None):
"""Add a profile to your profile list."""
# Remove tabs, newlines, and carriage returns and strip leading/trailing spaces from the name
name = None if name == None else name.replace("\n"," ").replace("\r","").replace("\t"," ").strip()
if name == None or link == None:
msg = 'Usage: `{}addprofile "[profile name]" [link]`'.format(ctx.prefix)
return await ctx.send(msg)
itemList = self.settings.getUserStat(ctx.author, ctx.guild, "Profiles")
if not itemList:
itemList = []
currentTime = int(time.time())
item = next((x for x in itemList if x["Name"].lower() == name.lower()),None)
if item:
msg = Utils.suppressed(ctx,"{}'s {} profile was updated!".format(DisplayName.name(ctx.author),Nullify.escape_all(item["Name"])))
item["URL"] = link
item["Updated"] = currentTime
else:
itemList.append({"Name":name,"URL":link,"Created":currentTime})
msg = Utils.suppressed(ctx,"{} added to {}'s profile list!".format(Nullify.escape_all(name),DisplayName.name(ctx.author)))
self.settings.setUserStat(ctx.author, ctx.guild, "Profiles", itemList)
await ctx.send(msg)
@commands.command(pass_context=True)
async def removeprofile(self, ctx, *, name = None):
"""Remove a profile from your profile list."""
name = None if name == None else name.replace("\n"," ").replace("\r","").replace("\t"," ").strip()
if name == None:
msg = 'Usage: `{}removeprofile [profile name]`'.format(ctx.prefix)
return await ctx.send(msg)
itemList = self.settings.getUserStat(ctx.author, ctx.guild, "Profiles")
if not itemList or itemList == []:
msg = '*{}* has no profiles set! They can add some with the `{}addprofile "[profile name]" [link]` command!'.format(DisplayName.name(ctx.author), ctx.prefix)
return await ctx.send(msg)
item = next((x for x in itemList if x["Name"].lower() == name.lower()),None)
if not item:
return await ctx.send(Utils.suppressed(ctx,"{} not found in {}'s profile list!".format(Nullify.escape_all(name),DisplayName.name(ctx.author))))
itemList.remove(item)
self.settings.setUserStat(ctx.author, ctx.guild, "Profiles", itemList)
await ctx.send(Utils.suppressed(ctx,"{} removed from {}'s profile list!".format(Nullify.escape_all(item["Name"]),DisplayName.name(ctx.author))))
def _get_profile(self,ctx,name=None):
parts = name.split()
for j in range(len(parts)):
# Reverse search direction
i = len(parts)-1-j
# Name = 0 up to i joined by space
name_str = ' '.join(parts[0:i+1])
# Profile = end of name -> end of parts joined by space
profile_str = ' '.join(parts[i+1:])
mem_from_name = DisplayName.memberForName(name_str, ctx.guild)
if mem_from_name:
# We got a member - let's check for a profile
itemList = self.settings.getUserStat(mem_from_name, ctx.guild, "Profiles", [])
item = next((x for x in itemList if x["Name"].lower() == profile_str.lower()),None)
if item: return (mem_from_name,item)
# Check if there is no member specified
itemList = self.settings.getUserStat(ctx.author, ctx.guild, "Profiles", [])
item = next((x for x in itemList if x["Name"].lower() == name.lower()),None)
if item: return (ctx.author,item)
return None
async def _get_profile_reply(self,ctx,name=None,raw=False):
if not name:
msg = "Usage: `{}{}profile [member] [profile name]`".format(ctx.prefix, "raw" if raw else "")
return await ctx.send(msg)
item = self._get_profile(ctx,name)
if item == None:
return await ctx.send("Sorry, I couldn't find that user/profile.")
member,item = item
msg = '*{}\'s {} Profile:*\n\n{}'.format(DisplayName.name(member), item['Name'], discord.utils.escape_markdown(item['URL']) if raw else item['URL'])
return await ctx.send(Utils.suppressed(ctx,msg))
async def METHOD_NAME(self,ctx,member=None,raw=None):
if not member:
member = ctx.author
else:
newMember = DisplayName.memberForName(member, ctx.guild)
if not newMember:
# no member found by that name
msg = 'I couldn\'t find *{}* on this server.'.format(member)
return await ctx.send(Utils.suppressed(ctx,msg))
member = newMember
# We have a member here
itemList = self.settings.getUserStat(member, ctx.guild, "Profiles")
if not itemList or itemList == []:
msg = '*{}* has no profiles set! They can add some with the `{}addprofile "[profile name]" [link]` command!'.format(DisplayName.name(ctx.author), ctx.prefix)
return await ctx.send(msg)
itemList = sorted(itemList, key=itemgetter('Name'))
itemText = "*{}'s* Profiles:\n\n".format(DisplayName.name(member))
itemText += discord.utils.escape_markdown("\n".join([x["Name"] for x in itemList])) if raw else "\n".join([x["Name"] for x in itemList])
return await Message.Message(message=Utils.suppressed(ctx,itemText)).send(ctx)
@commands.command(pass_context=True)
async def profile(self, ctx, *, member = None, name = None):
"""Retrieve a profile from the passed user's profile list."""
await self._get_profile_reply(ctx,member)
@commands.command(pass_context=True)
async def rawprofile(self, ctx, *, member = None, name = None):
"""Retrieve a profile's raw markdown from the passed user's profile list."""
await self._get_profile_reply(ctx,member,raw=True)
@commands.command(pass_context=True)
async def profileinfo(self, ctx, *, member = None, name = None):
"""Displays info about a profile from the passed user's profile list."""
if not member:
msg = 'Usage: `{}profileinfo [member] [profile name]`'.format(ctx.prefix)
return await ctx.send(msg)
item = self._get_profile(ctx,member)
if item == None:
return await ctx.send("Sorry, I couldn't find that user/profile.")
member,item = item
# We have a profile
current_time = int(time.time())
msg = '**{}:**\n'.format(item['Name'])
msg += "Created: {} ago\n".format(ReadableTime.getReadableTimeBetween(item.get("Created",None), current_time, True)) if item.get("Created",None) else "Created: `UNKNOWN`\n"
if item.get("Updated",None):
msg += "Updated: {} ago\n".format(ReadableTime.getReadableTimeBetween(item["Updated"], current_time, True))
return await ctx.send(Utils.suppressed(ctx,msg))
@commands.command(pass_context=True)
async def profiles(self, ctx, *, member = None):
"""List all profiles in the passed user's profile list."""
await self.METHOD_NAME(ctx,member)
@commands.command(pass_context=True)
async def rawprofiles(self, ctx, *, member = None):
"""List all profiles' raw markdown in the passed user's profile list."""
await self.METHOD_NAME(ctx,member,raw=True) |