id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
0 | set up class | # python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <dr.prodigy.github@gmail.com> (c) 2017-2023
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
from holidays.countries.dominican_republic import DominicanRepublic, DO, DOM
from tests.common import TestCase
class TestDominicanRepublic(TestCase):
@classmethod
def METHOD_NAME(cls):
super().METHOD_NAME(DominicanRepublic)
def test_country_aliases(self):
self.assertCountryAliases(DominicanRepublic, DO, DOM)
def test_2020(self):
self.assertHolidays(
("2020-01-01", "Año Nuevo"),
("2020-01-06", "Día de los Santos Reyes"),
("2020-01-21", "Día de la Altagracia"),
("2020-01-26", "Día de Duarte"),
("2020-02-27", "Día de Independencia"),
("2020-04-10", "Viernes Santo"),
("2020-05-04", "Día del Trabajo"),
("2020-06-11", "Corpus Christi"),
("2020-08-16", "Día de la Restauración"),
("2020-09-24", "Día de las Mercedes"),
("2020-11-09", "Día de la Constitución"),
("2020-12-25", "Día de Navidad"),
)
def test_2021(self):
self.assertHolidays(
("2021-01-01", "Año Nuevo"),
("2021-01-04", "Día de los Santos Reyes"),
("2021-01-21", "Día de la Altagracia"),
("2021-01-25", "Día de Duarte"),
("2021-02-27", "Día de Independencia"),
("2021-04-02", "Viernes Santo"),
("2021-05-01", "Día del Trabajo"),
("2021-06-03", "Corpus Christi"),
("2021-08-16", "Día de la Restauración"),
("2021-09-24", "Día de las Mercedes"),
("2021-11-06", "Día de la Constitución"),
("2021-12-25", "Día de Navidad"),
)
def test_2022(self):
self.assertHolidays(
("2022-01-01", "Año Nuevo"),
("2022-01-10", "Día de los Santos Reyes"),
("2022-01-21", "Día de la Altagracia"),
("2022-01-24", "Día de Duarte"),
("2022-02-27", "Día de Independencia"),
("2022-04-15", "Viernes Santo"),
("2022-05-02", "Día del Trabajo"),
("2022-06-16", "Corpus Christi"),
("2022-08-15", "Día de la Restauración"),
("2022-09-24", "Día de las Mercedes"),
("2022-11-06", "Día de la Constitución"),
("2022-12-25", "Día de Navidad"),
)
def test_movable(self):
self.assertHoliday(
"1996-01-06",
"1997-01-06",
"1998-01-05",
"1998-01-26",
"1999-01-25",
"1996-05-01",
"1998-05-04",
"1996-11-06",
"1997-11-10",
"2000-08-16",
"2001-08-20",
)
self.assertNoHoliday(
"1998-01-06",
"1999-01-26",
"1998-05-01",
"1997-11-06",
"2001-08-16",
)
def test_l10n_default(self):
self.assertLocalizedHolidays(
("2022-01-01", "Año Nuevo"),
("2022-01-10", "Día de los Santos Reyes"),
("2022-01-21", "Día de la Altagracia"),
("2022-01-24", "Día de Duarte"),
("2022-02-27", "Día de Independencia"),
("2022-04-15", "Viernes Santo"),
("2022-05-02", "Día del Trabajo"),
("2022-06-16", "Corpus Christi"),
("2022-08-15", "Día de la Restauración"),
("2022-09-24", "Día de las Mercedes"),
("2022-11-06", "Día de la Constitución"),
("2022-12-25", "Día de Navidad"),
)
def test_l10n_en_us(self):
self.assertLocalizedHolidays(
"en_US",
("2022-01-01", "New Year's Day"),
("2022-01-10", "Epiphany"),
("2022-01-21", "Lady of Altagracia"),
("2022-01-24", "Juan Pablo Duarte Day"),
("2022-02-27", "Independence Day"),
("2022-04-15", "Good Friday"),
("2022-05-02", "Labor Day"),
("2022-06-16", "Feast of Corpus Christi"),
("2022-08-15", "Restoration Day"),
("2022-09-24", "Our Lady of Mercedes Day"),
("2022-11-06", "Constitution Day"),
("2022-12-25", "Christmas Day"),
)
def test_l10n_uk(self):
self.assertLocalizedHolidays(
"uk",
("2022-01-01", "Новий рік"),
("2022-01-10", "Богоявлення"),
("2022-01-21", "День Богоматері Альтаграсія"),
("2022-01-24", "День Дуарте"),
("2022-02-27", "День незалежності"),
("2022-04-15", "Страсна пʼятниця"),
("2022-05-02", "День праці"),
("2022-06-16", "Свято Тіла і Крові Христових"),
("2022-08-15", "День реставрації"),
("2022-09-24", "День Богоматері Милосердя"),
("2022-11-06", "День Конституції"),
("2022-12-25", "Різдво Христове"),
) |
1 | str indent | import re, inspect, textwrap, pydoc
from docscrape import NumpyDocString, FunctionDoc, ClassDoc
from six import iteritems
class SphinxDocString(NumpyDocString):
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def METHOD_NAME(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param,param_type,desc in self[name]:
out += self.METHOD_NAME(['**%s** : %s' % (param.strip(),
param_type)])
out += ['']
out += self.METHOD_NAME(desc,8)
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self.METHOD_NAME(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self.METHOD_NAME(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in iteritems(idx):
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
return out
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
for param_list in ('Parameters', 'Attributes', 'Methods',
'Returns','Raises'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_section('Examples')
out = self.METHOD_NAME(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
pass
class SphinxClassDoc(SphinxDocString, ClassDoc):
pass
def get_doc_object(obj, what=None, doc=None):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif callable(obj):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, '', func_doc=SphinxFunctionDoc, doc=doc)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, '', doc=doc)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxDocString(doc)
|
2 | set mkl envs | #!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
import subprocess as sp
DEFAULT_SEASTAR_PORT="3333"
JEMALLOC_244 = "libjemalloc.so.2.4.4"
JEMALLOC_251 = "libjemalloc.so.2.5.1"
def gen_cluster_info(workspace):
tf_config_json = os.environ.get("TF_CONFIG", "{}")
print("TF_CONFIG=", tf_config_json)
tf_config = json.loads(tf_config_json)
cluster = tf_config.get("cluster", {})
if cluster is None:
print("TF_CONFIG cluster is empty")
return
ps_hosts = []
worker_hosts = []
chief_hosts = []
node_list = []
for key, value in cluster.items():
if "ps" == key:
ps_hosts = value
elif "worker" == key:
worker_hosts = value
elif "chief" == key:
chief_hosts = value
node_list.extend(value)
os.environ['TF_SEASTAR_ENDPOINT_MAP_PATH'] = '/tmp/'
print("Start to gen endpoint_map file.")
#endpoint_map_path = os.path.join(workspace, ".endpoint_map")
endpoint_map_path = "/tmp/.endpoint_map"
with open(endpoint_map_path, 'w') as fout:
for node in node_list:
host = node[0:node.index(':')]
fout.write(node + "=" + host + ":" + DEFAULT_SEASTAR_PORT + "\n")
os.system("ls -ltr /tmp/.endpoint_map")
task = tf_config.get("task", {})
if task is None:
print("TF_CONFIG task is empty")
return
task_index = task['index']
job_name = task['type']
return ps_hosts, worker_hosts, chief_hosts, job_name, task_index
def copy_python_binary(local_dir):
cmd_str = "cp /usr/bin/python " + os.path.join(local_dir, "python_bin")
return sp.call(cmd_str, shell=True)
def set_jemalloc_version(workspace):
strategy = os.environ.get("MEM_USAGE_STRATEGY", "")
cmd_str = ""
if "xmin" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
cmd_str += "export MALLOC_CONF=decay_time:0;"
elif "xmid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "min" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=dirty_decay_ms:0,muzzy_decay_ms:0;"
elif "mid" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,dirty_decay_ms:10000,muzzy_decay_ms:10000;"
elif "max" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
elif "244" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_244) + ";"
elif "251" == strategy:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:60000,muzzy_decay_ms:60000;"
elif "close" == strategy:
pass
else:
cmd_str = "export JEMALLOC_VERSION=" + os.path.join(workspace, JEMALLOC_251) + ";"
cmd_str += "export MALLOC_CONF=background_thread:true,metadata_thp:auto,dirty_decay_ms:240000,muzzy_decay_ms:240000;"
return cmd_str
def pip_install_requirements(workspace):
requirements_path = os.path.join(workspace, "requirements.txt")
if not os.path.exists(requirements_path):
return 0
cmd_str = "$(which pip) install -r " + requirements_path
print("try to install requirements.txt from " + requirements_path)
return sp.call(cmd_str, shell=True)
def run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd):
cmd_str = "cd " + workspace + ";"
if set_jemalloc_version_cmd:
cmd_str += set_jemalloc_version_cmd
cmd_str += "LD_PRELOAD=${JEMALLOC_VERSION} "
cmd_str += " ".join(tf_envs) + " $(which python) -u "
cmd_str += tf_script + " " + " ".join(tf_args)
print("run tensorflow command:", cmd_str)
return sp.call(cmd_str, shell=True)
def METHOD_NAME(job_name):
envs = []
if "ps" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "worker" == job_name:
envs.append("OMP_NUM_THREADS=6")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
elif "evaluator" == job_name or "chief" == job_name:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
else:
envs.append("OMP_NUM_THREADS=1")
envs.append("KMP_BLOCKTIME=0")
envs.append("MKL_ENABLE_INSTRUCTIONS=AVX2")
return envs
def set_network_threads(job_name):
envs = []
if "ps" == job_name:
envs.append("WORKER_DEFAULT_CORE_NUM=24")
elif "worker" == job_name:
envs.append("PS_DEFAULT_CORE_NUM=24")
return envs
if __name__ == "__main__":
print("start launching tensorflow job")
if "TF_WORKSPACE" not in os.environ:
print("TF_WORKSPACE env should be set.")
exit(1)
workspace = os.environ.get("TF_WORKSPACE", "")
if "TF_SCRIPT" not in os.environ:
print("TF_SCRIPT env should be set.")
exit(1)
tf_script = os.environ.get("TF_SCRIPT", "")
if "JEMALLOC_PATH" not in os.environ:
jemalloc_path = workspace
else:
jemalloc_path = os.environ.get("JEMALLOC_PATH", "")
#ret_code = copy_python_binary(workspace)
#if (ret_code != 0):
# exit(ret_code)
tf_args = sys.argv[1:]
tf_envs = []
#tf_envs.append("TF_SEASTAR_ENDPOINT_MAP_PATH=/tmp/")
if "TF_CONFIG" in os.environ:
ps_hosts, worker_hosts, chief_hosts, job_name, task_index = gen_cluster_info(workspace)
os.environ["TASK_INDEX"] = str(task_index)
os.environ["JOB_NAME"] = str(job_name)
#tf_envs.extend(set_mkl_envs(job_name))
set_jemalloc_version_cmd = set_jemalloc_version(jemalloc_path)
ret_code = pip_install_requirements(workspace)
if (ret_code != 0):
exit(ret_code)
ret_code = run_tensorflow_job(workspace, tf_script, tf_args, tf_envs, set_jemalloc_version_cmd)
if (ret_code != 0):
exit(ret_code) |
3 | asym enc | # Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from cryptography.exceptions import InvalidKey, InvalidSignature
from cryptography.hazmat.primitives import asymmetric, ciphers, hashes, padding
from cryptography.x509 import Certificate
HASH_LENGTH = 4 # Adjustable to avoid collision
NONCE_LENGTH = 16 # For AES, this is 128 bits (i.e. block size)
KEY_LENGTH = 32 # AES 256. Choose from 16, 24, 32
HEADER_LENGTH = HASH_LENGTH + NONCE_LENGTH
PADDING_LENGTH = NONCE_LENGTH * 8 # in bits
KEY_ENC_LENGTH = 256
SIGNATURE_LENGTH = 256
SIMPLE_HEADER_LENGTH = NONCE_LENGTH + KEY_ENC_LENGTH + SIGNATURE_LENGTH
def get_hash(value):
hash = hashes.Hash(hashes.SHA256())
hash.update(value)
return hash.finalize()
class SessionKeyUnavailable(Exception):
pass
class InvalidCertChain(Exception):
pass
def METHOD_NAME(k, m):
return k.encrypt(
m,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None
),
)
def _asym_dec(k, m):
return k.decrypt(
m,
asymmetric.padding.OAEP(
mgf=asymmetric.padding.MGF1(algorithm=hashes.SHA256()), algorithm=hashes.SHA256(), label=None
),
)
def _sign(k, m):
return k.sign(
data=m,
padding=asymmetric.padding.PSS(
mgf=asymmetric.padding.MGF1(hashes.SHA256()),
salt_length=asymmetric.padding.PSS.MAX_LENGTH,
),
algorithm=hashes.SHA256(),
)
def _verify(k, m, s):
k.verify(
s,
m,
asymmetric.padding.PSS(
mgf=asymmetric.padding.MGF1(hashes.SHA256()), salt_length=asymmetric.padding.PSS.MAX_LENGTH
),
hashes.SHA256(),
)
def _sym_enc(k, n, m):
cipher = ciphers.Cipher(ciphers.algorithms.AES(k), ciphers.modes.CBC(n))
encryptor = cipher.encryptor()
padder = padding.PKCS7(PADDING_LENGTH).padder()
padded_data = padder.update(m) + padder.finalize()
return encryptor.update(padded_data) + encryptor.finalize()
def _sym_dec(k, n, m):
cipher = ciphers.Cipher(ciphers.algorithms.AES(k), ciphers.modes.CBC(n))
decryptor = cipher.decryptor()
plain_text = decryptor.update(m)
plain_text = plain_text + decryptor.finalize()
unpadder = padding.PKCS7(PADDING_LENGTH).unpadder()
return unpadder.update(plain_text) + unpadder.finalize()
class SessionKeyManager:
def __init__(self, root_ca):
self.key_hash_dict = dict()
self.root_ca = root_ca
self.root_ca_pub_key = root_ca.public_key()
def validate_cert_chain(self, cert):
self.root_ca_pub_key.verify(
cert.signature, cert.tbs_certificate_bytes, asymmetric.padding.PKCS1v15(), cert.signature_hash_algorithm
)
def key_request(self, remote_cert, local_cert, local_pri_key):
session_key = os.urandom(KEY_LENGTH)
signature = _sign(local_pri_key, session_key)
try:
self.validate_cert_chain(remote_cert)
except InvalidSignature:
return False
remote_pub_key = remote_cert.public_key()
key_enc = METHOD_NAME(remote_pub_key, session_key)
self.key_hash_dict[get_hash(session_key)[-HASH_LENGTH:]] = session_key
key_response = key_enc + signature
return key_response
def process_key_response(self, remote_cert, local_cert, local_pri_key, key_response):
key_enc, signature = key_response[:KEY_ENC_LENGTH], key_response[KEY_ENC_LENGTH:]
try:
session_key = _asym_dec(local_pri_key, key_enc)
self.validate_cert_chain(remote_cert)
public_key = remote_cert.public_key()
_verify(public_key, session_key, signature)
self.key_hash_dict[get_hash(session_key)[-HASH_LENGTH:]] = session_key
except (InvalidKey, InvalidSignature):
return False
return True
def key_available(self):
return bool(self.key_hash_dict)
def get_key(self, key_hash):
return self.key_hash_dict.get(key_hash)
def get_latest_key(self):
try:
k, last_value = _, self.key_hash_dict[k] = self.key_hash_dict.popitem()
except KeyError as e:
raise SessionKeyUnavailable("No session key established yet")
return last_value
class CellCipher:
def __init__(self, session_key_manager: SessionKeyManager):
self.session_key_manager = session_key_manager
def encrypt(self, message):
key = self.session_key_manager.get_latest_key()
key_hash = get_hash(key)
nonce = os.urandom(NONCE_LENGTH)
return nonce + key_hash[-HASH_LENGTH:] + _sym_enc(key, nonce, message)
def decrypt(self, message):
nonce, key_hash, message = (
message[:NONCE_LENGTH],
message[NONCE_LENGTH:HEADER_LENGTH],
message[HEADER_LENGTH:],
)
key = self.session_key_manager.get_key(key_hash)
if key is None:
raise SessionKeyUnavailable("No session key found for received message")
return _sym_dec(key, nonce, message)
class SimpleCellCipher:
def __init__(self, root_ca: Certificate, pri_key: asymmetric.rsa.RSAPrivateKey, cert: Certificate):
self._root_ca = root_ca
self._root_ca_pub_key = root_ca.public_key()
self._pri_key = pri_key
self._cert = cert
self._pub_key = cert.public_key()
self._validate_cert_chain(self._cert)
self._cached_enc = dict()
self._cached_dec = dict()
def _validate_cert_chain(self, cert: Certificate):
self._root_ca_pub_key.verify(
cert.signature, cert.tbs_certificate_bytes, asymmetric.padding.PKCS1v15(), cert.signature_hash_algorithm
)
def encrypt(self, message: bytes, target_cert: Certificate):
cert_hash = hash(target_cert)
secret = self._cached_enc.get(cert_hash)
if secret is None:
self._validate_cert_chain(target_cert)
key = os.urandom(KEY_LENGTH)
remote_pub_key = target_cert.public_key()
key_enc = METHOD_NAME(remote_pub_key, key)
signature = _sign(self._pri_key, key_enc)
self._cached_enc[cert_hash] = (key, key_enc, signature)
else:
(key, key_enc, signature) = secret
nonce = os.urandom(NONCE_LENGTH)
ct = nonce + key_enc + signature + _sym_enc(key, nonce, message)
return ct
def decrypt(self, message: bytes, origin_cert: Certificate):
nonce, key_enc, signature = (
message[:NONCE_LENGTH],
message[NONCE_LENGTH : NONCE_LENGTH + KEY_ENC_LENGTH],
message[NONCE_LENGTH + KEY_ENC_LENGTH : SIMPLE_HEADER_LENGTH],
)
key_hash = hash(key_enc)
dec = self._cached_dec.get(key_hash)
if dec is None:
self._validate_cert_chain(origin_cert)
public_key = origin_cert.public_key()
_verify(public_key, key_enc, signature)
key = _asym_dec(self._pri_key, key_enc)
self._cached_dec[key_hash] = key
else:
key = dec
return _sym_dec(key, nonce, message[SIMPLE_HEADER_LENGTH:]) |
4 | test monitors pg | import os
import json
from bgpcfgd.template import TemplateFabric
from bgpcfgd.config import ConfigMgr
from .util import load_constants_dir_mappings
TEMPLATE_PATH = os.path.abspath('../../dockers/docker-fpm-frr/frr')
def load_tests(peer_type, template_name):
constants = load_constants_dir_mappings()
path = "tests/data/%s/%s" % (constants[peer_type], template_name)
param_files = [name for name in os.listdir(path)
if os.path.isfile(os.path.join(path, name)) and name.startswith("param_")]
tests = []
for param_fname in param_files:
casename = param_fname.replace("param_", "").replace(".json", "")
result_fname = "result_%s.conf" % casename
full_param_fname = os.path.join(path, param_fname)
full_result_fname = os.path.join(path, result_fname)
tests.append((casename, full_param_fname, full_result_fname))
tmpl_path = os.path.join("bgpd", "templates", constants[peer_type], "%s.j2" % template_name)
return tmpl_path, tests
def load_json(fname):
with open(fname) as param_fp:
raw_params = json.load(param_fp)
params = {}
for table_key, table_entries in raw_params.items():
if table_key.startswith("CONFIG_DB__"):
# convert CONFIG_DB__* entries keys into tuple if needed
new_table_entries = {}
for entry_key, entry_value in table_entries.items():
if '|' in entry_key:
new_key = tuple(entry_key.split('|'))
else:
new_key = entry_key
new_table_entries[new_key] = entry_value
params[table_key] = new_table_entries
else:
params[table_key] = table_entries
return params
def compress_comments(raw_config):
comment_counter = 0
output = []
for line in raw_config.split('\n'):
stripped_line = line.strip()
# Skip empty lines
if stripped_line == '':
pass
# Write lines without comments
elif not stripped_line.startswith('!'):
if comment_counter > 0:
output.append("!")
comment_counter = 0
output.append(line)
# Write non-empty comments
elif stripped_line.startswith('!') and len(stripped_line) > 1:
if comment_counter > 0:
output.append("!")
comment_counter = 0
output.append(line)
# Count empty comments
else: # stripped_line == '!'
comment_counter += 1
# Flush last comment if we have one
if comment_counter > 0:
output.append("!")
return "\n".join(output) + "\n"
def write_result(fname, raw_result):
with open(fname, 'w') as fp:
raw_result_w_commpressed_comments = compress_comments(raw_result)
fp.write(raw_result_w_commpressed_comments)
def run_tests(test_name, template_fname, tests):
tf = TemplateFabric(TEMPLATE_PATH)
template = tf.from_file(template_fname)
for case_name, param_fname, result_fname in tests:
params = load_json(param_fname)
raw_generated_result = str(template.render(params))
assert "None" not in raw_generated_result, "Test %s.%s" % (test_name, case_name)
# this is used only for initial generation write_result(result_fname, raw_generated_result)
canonical_generated_result = ConfigMgr.to_canonical(raw_generated_result)
with open(result_fname) as result_fp:
raw_saved_result = result_fp.read()
canonical_saved_result = ConfigMgr.to_canonical(raw_saved_result)
assert canonical_saved_result == canonical_generated_result, "Test %s.%s" % (test_name, case_name)
# Tests
def test_general_policies():
test_data = load_tests("general", "policies.conf")
run_tests("general_policies", *test_data)
def test_general_pg():
test_data = load_tests("general", "peer-group.conf")
run_tests("general_pg", *test_data)
def test_general_instance():
test_data = load_tests("general", "instance.conf")
run_tests("general_instance", *test_data)
def test_internal_policies():
test_data = load_tests("internal", "policies.conf")
run_tests("internal_policies", *test_data)
def test_internal_pg():
test_data = load_tests("internal", "peer-group.conf")
run_tests("internal_pg", *test_data)
def test_internal_instance():
test_data = load_tests("internal", "instance.conf")
run_tests("internal_instance", *test_data)
def test_dynamic_policies():
test_data = load_tests("dynamic", "policies.conf")
run_tests("dynamic_policies", *test_data)
def test_dynamic_pg():
test_data = load_tests("dynamic", "peer-group.conf")
run_tests("dynamic_pg", *test_data)
def test_dynamic_instance():
test_data = load_tests("dynamic", "instance.conf")
run_tests("dynamic_instance", *test_data)
def test_monitors_policies():
test_data = load_tests("monitors", "policies.conf")
run_tests("monitors_policies", *test_data)
def METHOD_NAME():
test_data = load_tests("monitors", "peer-group.conf")
run_tests("monitors_pg", *test_data)
def test_monitors_instance():
test_data = load_tests("monitors", "instance.conf")
run_tests("monitors_instance", *test_data)
def test_voq_chassis_policies():
test_data = load_tests("voq_chassis", "policies.conf")
run_tests("voq_chassis_policies", *test_data)
def test_voq_chassis_pg():
test_data = load_tests("voq_chassis", "peer-group.conf")
run_tests("voq_chassis_pg", *test_data)
def test_voq_chassis_instance():
test_data = load_tests("voq_chassis", "instance.conf")
run_tests("voq_chassis_instance", *test_data)
def test_sentinel_policies():
test_data = load_tests("sentinels", "policies.conf")
run_tests("sentinel_policies", *test_data)
def test_sentinel_pg():
test_data = load_tests("sentinels", "peer-group.conf")
run_tests("sentinel_pg", *test_data)
def test_sentinel_instance():
test_data = load_tests("sentinels", "instance.conf")
run_tests("sentinel_instance", *test_data) |
5 | webhook payment gateway initialize tokenization response | import json
import graphene
import mock
import pytest
from ....core.models import EventDelivery
from ....payment.interface import (
PaymentGatewayInitializeTokenizationRequestData,
PaymentGatewayInitializeTokenizationResponseData,
PaymentGatewayInitializeTokenizationResult,
)
from ....settings import WEBHOOK_SYNC_TIMEOUT
PAYMENT_GATEWAY_INITIALIZE_TOKENIZATION = """
subscription {
event {
... on PaymentGatewayInitializeTokenizationSession{
user{
id
}
channel{
id
}
data
}
}
}
"""
@pytest.fixture
def METHOD_NAME():
return {
"result": (
PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED.name
),
"data": {"foo": "bar"},
}
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_payment_gateway_initialize_tokenization_with_static_payload(
mock_request,
customer_user,
webhook_plugin,
payment_gateway_initialize_tokenization_app,
METHOD_NAME,
channel_USD,
):
# given
mock_request.return_value = METHOD_NAME
plugin = webhook_plugin()
expected_data = {"foo": "bar"}
request_data = PaymentGatewayInitializeTokenizationRequestData(
user=customer_user,
app_identifier=payment_gateway_initialize_tokenization_app.identifier,
channel=channel_USD,
data=expected_data,
)
previous_value = PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Payment gateway initialize tokenization failed to deliver.",
data=None,
)
# when
response = plugin.payment_gateway_initialize_tokenization(
request_data, previous_value
)
# then
delivery = EventDelivery.objects.get()
assert json.loads(delivery.payload.payload) == {
"user_id": graphene.Node.to_global_id("User", customer_user.pk),
"channel_slug": channel_USD.slug,
"data": expected_data,
}
mock_request.assert_called_once_with(delivery, timeout=WEBHOOK_SYNC_TIMEOUT)
assert response == PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED,
error=None,
data=METHOD_NAME["data"],
)
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_payment_gateway_initialize_tokenization_with_subscription_payload(
mock_request,
customer_user,
webhook_plugin,
payment_gateway_initialize_tokenization_app,
METHOD_NAME,
channel_USD,
):
# given
mock_request.return_value = METHOD_NAME
webhook = payment_gateway_initialize_tokenization_app.webhooks.first()
webhook.subscription_query = PAYMENT_GATEWAY_INITIALIZE_TOKENIZATION
webhook.save()
plugin = webhook_plugin()
expected_data = {"foo": "bar"}
request_data = PaymentGatewayInitializeTokenizationRequestData(
user=customer_user,
app_identifier=payment_gateway_initialize_tokenization_app.identifier,
channel=channel_USD,
data=expected_data,
)
previous_value = PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Payment gateway initialize tokenization failed to deliver.",
data=None,
)
# when
response = plugin.payment_gateway_initialize_tokenization(
request_data, previous_value
)
# then
delivery = EventDelivery.objects.get()
assert json.loads(delivery.payload.payload) == {
"user": {"id": graphene.Node.to_global_id("User", customer_user.pk)},
"data": expected_data,
"channel": {"id": graphene.Node.to_global_id("Channel", channel_USD.pk)},
}
mock_request.assert_called_once_with(delivery, timeout=WEBHOOK_SYNC_TIMEOUT)
assert response == PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.SUCCESSFULLY_INITIALIZED,
error=None,
data=METHOD_NAME["data"],
)
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_payment_gateway_initialize_tokenization_missing_correct_response_from_webhook(
mock_request,
customer_user,
webhook_plugin,
payment_gateway_initialize_tokenization_app,
channel_USD,
):
# given
mock_request.return_value = None
webhook = payment_gateway_initialize_tokenization_app.webhooks.first()
webhook.subscription_query = PAYMENT_GATEWAY_INITIALIZE_TOKENIZATION
webhook.save()
plugin = webhook_plugin()
expected_data = {"foo": "bar"}
request_data = PaymentGatewayInitializeTokenizationRequestData(
user=customer_user,
app_identifier=payment_gateway_initialize_tokenization_app.identifier,
channel=channel_USD,
data=expected_data,
)
previous_value = PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Payment gateway initialize tokenization failed to deliver.",
data=None,
)
# when
response = plugin.payment_gateway_initialize_tokenization(
request_data, previous_value
)
# then
delivery = EventDelivery.objects.get()
mock_request.assert_called_once_with(delivery, timeout=WEBHOOK_SYNC_TIMEOUT)
assert response == PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Failed to delivery request.",
data=None,
)
@mock.patch("saleor.plugins.webhook.tasks.send_webhook_request_sync")
def test_payment_gateway_initialize_tokenization_failure_from_app(
mock_request,
customer_user,
webhook_plugin,
payment_gateway_initialize_tokenization_app,
channel_USD,
):
# given
expected_error_msg = "Expected error msg."
mock_request.return_value = {
"result": PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE.name,
"error": expected_error_msg,
"data": None,
}
plugin = webhook_plugin()
expected_data = {"foo": "bar"}
request_data = PaymentGatewayInitializeTokenizationRequestData(
user=customer_user,
app_identifier=payment_gateway_initialize_tokenization_app.identifier,
channel=channel_USD,
data=expected_data,
)
previous_value = PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,
error="Payment gateway initialize tokenization failed to deliver.",
data=None,
)
# when
response = plugin.payment_gateway_initialize_tokenization(
request_data, previous_value
)
# then
delivery = EventDelivery.objects.get()
assert json.loads(delivery.payload.payload) == {
"user_id": graphene.Node.to_global_id("User", customer_user.pk),
"channel_slug": channel_USD.slug,
"data": expected_data,
}
mock_request.assert_called_once_with(delivery, timeout=WEBHOOK_SYNC_TIMEOUT)
assert response == PaymentGatewayInitializeTokenizationResponseData(
result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_INITIALIZE,
error=expected_error_msg,
data=None,
) |
6 | test mobile get unauthenticated user | """
Tests for reset deadlines endpoint.
"""
import datetime
import ddt
from django.urls import reverse
from django.utils import timezone
from edx_toggles.toggles.testutils import override_waffle_flag
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.util.testing import EventTestMixin
from lms.djangoapps.course_home_api.tests.utils import BaseCourseHomeTests
from lms.djangoapps.courseware.tests.helpers import MasqueradeMixin
from openedx.core.djangoapps.schedules.models import Schedule
from openedx.features.course_experience import RELATIVE_DATES_DISABLE_RESET_FLAG, RELATIVE_DATES_FLAG
from xmodule.modulestore.tests.factories import CourseFactory
@ddt.ddt
class ResetCourseDeadlinesViewTests(EventTestMixin, BaseCourseHomeTests, MasqueradeMixin):
"""
Tests for reset deadlines endpoint.
"""
def setUp(self): # pylint: disable=arguments-differ
# Need to supply tracker name for the EventTestMixin. Also, EventTestMixin needs to come
# first in class inheritance so the setUp call here appropriately works
super().setUp('openedx.features.course_experience.api.v1.views.tracker')
self.course = CourseFactory.create(self_paced=True, start=timezone.now() - datetime.timedelta(days=1000))
def test_reset_deadlines(self):
enrollment = CourseEnrollment.enroll(self.user, self.course.id, CourseMode.VERIFIED)
enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=100)
enrollment.schedule.save()
# Test body with incorrect body param (course_key is required)
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course': self.course.id})
assert response.status_code == 400
assert enrollment.schedule == Schedule.objects.get(id=enrollment.schedule.id)
self.assert_no_events_were_emitted()
# Test correct post body
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
assert response.status_code == 200
assert enrollment.schedule.start_date < Schedule.objects.get(id=enrollment.schedule.id).start_date
self.assert_event_emitted(
'edx.ui.lms.reset_deadlines.clicked',
courserun_key=str(self.course.id),
is_masquerading=False,
is_staff=False,
org_key=self.course.org,
user_id=self.user.id,
)
@override_waffle_flag(RELATIVE_DATES_FLAG, active=True)
@override_waffle_flag(RELATIVE_DATES_DISABLE_RESET_FLAG, active=True)
def test_reset_deadlines_disabled(self):
enrollment = CourseEnrollment.enroll(self.user, self.course.id, CourseMode.VERIFIED)
enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=100)
enrollment.schedule.save()
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
assert response.status_code == 200
assert enrollment.schedule == Schedule.objects.get(id=enrollment.schedule.id)
self.assert_no_events_were_emitted()
def test_reset_deadlines_with_masquerade(self):
""" Staff users should be able to masquerade as a learner and reset the learner's schedule """
student_username = self.user.username
student_user_id = self.user.id
student_enrollment = CourseEnrollment.enroll(self.user, self.course.id)
student_enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=100)
student_enrollment.schedule.save()
staff_enrollment = CourseEnrollment.enroll(self.staff_user, self.course.id)
staff_enrollment.schedule.start_date = timezone.now() - datetime.timedelta(days=30)
staff_enrollment.schedule.save()
self.switch_to_staff()
self.update_masquerade(course=self.course, username=student_username)
self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
updated_schedule = Schedule.objects.get(id=student_enrollment.schedule.id)
assert updated_schedule.start_date.date() == datetime.datetime.today().date()
updated_staff_schedule = Schedule.objects.get(id=staff_enrollment.schedule.id)
assert updated_staff_schedule.start_date == staff_enrollment.schedule.start_date
self.assert_event_emitted(
'edx.ui.lms.reset_deadlines.clicked',
courserun_key=str(self.course.id),
is_masquerading=True,
is_staff=False,
org_key=self.course.org,
user_id=student_user_id,
)
def test_post_unauthenticated_user(self):
self.client.logout()
response = self.client.post(reverse('course-experience-reset-course-deadlines'), {'course_key': self.course.id})
assert response.status_code == 401
def test_mobile_get_banner_info(self):
response = self.client.get(reverse('course-experience-course-deadlines-mobile', args=[self.course.id]))
assert response.status_code == 200
self.assertContains(response, 'missed_deadlines')
self.assertContains(response, 'missed_gated_content')
self.assertContains(response, 'content_type_gating_enabled')
self.assertContains(response, 'verified_upgrade_link')
def test_mobile_get_unknown_course(self):
url = reverse('course-experience-course-deadlines-mobile', args=['course-v1:unknown+course+2T2020'])
response = self.client.get(url)
assert response.status_code == 404
def METHOD_NAME(self):
self.client.logout()
response = self.client.get(reverse('course-experience-course-deadlines-mobile', args=[self.course.id]))
assert response.status_code == 401 |
7 | run test | #
# This script needs to be run on startup
# qemu -kernel ${KERNEL} -s -S
# and then:
# gdb ${KERNEL}.vmlinux -x ${QEMU_SRC}/tests/guest-debug/test-gdbstub.py
import gdb
failcount = 0
def report(cond, msg):
"Report success/fail of test"
if cond:
print ("PASS: %s" % (msg))
else:
print ("FAIL: %s" % (msg))
global failcount
failcount += 1
def check_step():
"Step an instruction, check it moved."
start_pc = gdb.parse_and_eval('$pc')
gdb.execute("si")
end_pc = gdb.parse_and_eval('$pc')
return not (start_pc == end_pc)
def check_break(sym_name):
"Setup breakpoint, continue and check we stopped."
sym, ok = gdb.lookup_symbol(sym_name)
bp = gdb.Breakpoint(sym_name)
gdb.execute("c")
# hopefully we came back
end_pc = gdb.parse_and_eval('$pc')
print ("%s == %s %d" % (end_pc, sym.value(), bp.hit_count))
bp.delete()
# can we test we hit bp?
return end_pc == sym.value()
# We need to do hbreak manually as the python interface doesn't export it
def check_hbreak(sym_name):
"Setup hardware breakpoint, continue and check we stopped."
sym, ok = gdb.lookup_symbol(sym_name)
gdb.execute("hbreak %s" % (sym_name))
gdb.execute("c")
# hopefully we came back
end_pc = gdb.parse_and_eval('$pc')
print ("%s == %s" % (end_pc, sym.value()))
if end_pc == sym.value():
gdb.execute("d 1")
return True
else:
return False
class WatchPoint(gdb.Breakpoint):
def get_wpstr(self, sym_name):
"Setup sym and wp_str for given symbol."
self.sym, ok = gdb.lookup_symbol(sym_name)
wp_addr = gdb.parse_and_eval(sym_name).address
self.wp_str = '*(%(type)s)(&%(address)s)' % dict(
type = wp_addr.type, address = sym_name)
return(self.wp_str)
def __init__(self, sym_name, type):
wp_str = self.get_wpstr(sym_name)
super(WatchPoint, self).__init__(wp_str, gdb.BP_WATCHPOINT, type)
def stop(self):
end_pc = gdb.parse_and_eval('$pc')
print ("HIT WP @ %s" % (end_pc))
return True
def do_one_watch(sym, wtype, text):
wp = WatchPoint(sym, wtype)
gdb.execute("c")
report_str = "%s for %s (%s)" % (text, sym, wp.sym.value())
if wp.hit_count > 0:
report(True, report_str)
wp.delete()
else:
report(False, report_str)
def check_watches(sym_name):
"Watch a symbol for any access."
# Should hit for any read
do_one_watch(sym_name, gdb.WP_ACCESS, "awatch")
# Again should hit for reads
do_one_watch(sym_name, gdb.WP_READ, "rwatch")
# Finally when it is written
do_one_watch(sym_name, gdb.WP_WRITE, "watch")
class CatchBreakpoint(gdb.Breakpoint):
def __init__(self, sym_name):
super(CatchBreakpoint, self).__init__(sym_name)
self.sym, ok = gdb.lookup_symbol(sym_name)
def stop(self):
end_pc = gdb.parse_and_eval('$pc')
print ("CB: %s == %s" % (end_pc, self.sym.value()))
if end_pc == self.sym.value():
report(False, "Hit final catchpoint")
def METHOD_NAME():
"Run through the tests one by one"
print ("Checking we can step the first few instructions")
step_ok = 0
for i in range(3):
if check_step():
step_ok += 1
report(step_ok == 3, "single step in boot code")
print ("Checking HW breakpoint works")
break_ok = check_hbreak("kernel_init")
report(break_ok, "hbreak @ kernel_init")
# Can't set this up until we are in the kernel proper
# if we make it to run_init_process we've over-run and
# one of the tests failed
print ("Setup catch-all for run_init_process")
cbp = CatchBreakpoint("run_init_process")
cpb2 = CatchBreakpoint("try_to_run_init_process")
print ("Checking Normal breakpoint works")
break_ok = check_break("wait_for_completion")
report(break_ok, "break @ wait_for_completion")
print ("Checking watchpoint works")
check_watches("system_state")
#
# This runs as the script it sourced (via -x)
#
try:
print ("Connecting to remote")
gdb.execute("target remote localhost:1234")
# These are not very useful in scripts
gdb.execute("set pagination off")
gdb.execute("set confirm off")
# Run the actual tests
METHOD_NAME()
except:
print ("GDB Exception: %s" % (sys.exc_info()[0]))
failcount += 1
import code
code.InteractiveConsole(locals=globals()).interact()
raise
# Finally kill the inferior and exit gdb with a count of failures
gdb.execute("kill")
exit(failcount) |
8 | fp16 to fp32 | #!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch
logger: logging.Logger = logging.getLogger()
try:
# pyre-ignore[21]
from fbgemm_gpu import open_source # noqa: F401
except Exception:
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops")
torch.ops.load_library("//deeplearning/fbgemm/fbgemm_gpu:sparse_ops_cpu")
TORCH_HALF_MIN: float = torch.finfo(torch.float16).min
TORCH_HALF_MAX: float = torch.finfo(torch.float16).max
TORCH_BFLOAT16_MIN: float = torch.finfo(torch.bfloat16).min
TORCH_BFLOAT16_MAX: float = torch.finfo(torch.bfloat16).max
def fp32_to_fp16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_HALF_MIN, TORCH_HALF_MAX).half()
def fp32_to_bf16_with_clamp(tensor: torch.Tensor) -> torch.Tensor:
return torch.clamp(tensor, TORCH_BFLOAT16_MIN, TORCH_BFLOAT16_MAX).bfloat16()
def fp32_to_hfp8_with_clamp(
tensor: torch.Tensor, ebits: int = 4, mbits: int = 3, bias: int = 15
) -> torch.Tensor:
max_pos: float = (2 ** ((1 << ebits) - 2 - bias)) * (2 - 2 ** (-mbits))
return torch.ops.fbgemm.FloatToHFP8Quantized(
tensor.contiguous(),
ebits,
bias,
max_pos,
)
def METHOD_NAME(tensor: torch.Tensor) -> torch.Tensor:
return tensor.float()
def bf16_to_fp32(tensor: torch.Tensor) -> torch.Tensor:
return tensor.view(torch.bfloat16).float()
def hfp8_to_fp32(tensor: torch.Tensor, ebits: int = 4, bias: int = 15) -> torch.Tensor:
return torch.ops.fbgemm.HFP8QuantizedToFloat(
tensor.contiguous().view(torch.uint8),
ebits,
bias,
)
def measure_fp16_quant_error(input_tensor: torch.Tensor) -> None:
# TODO: log to tensorboard
num_nan_fp32_tensor = torch.numel(input_tensor[torch.isnan(input_tensor)])
logger.info(
"num NaN in fp32 tensor: {}, ratio: {}.".format(
num_nan_fp32_tensor, num_nan_fp32_tensor / torch.numel(input_tensor)
)
)
logger.info(
"fp32 tensor profile: min: {}, max: {}, min abs:{}, max abs:{}.".format(
torch.min(input_tensor),
torch.max(input_tensor),
torch.min(torch.abs(input_tensor)),
torch.max(torch.abs(input_tensor)),
)
)
fp16_tensor = fp32_to_fp16_with_clamp(input_tensor)
num_nan_fp16_tensor = torch.numel(fp16_tensor[torch.isnan(fp16_tensor)])
logger.info(
"num NaN in fp16 tensor: {}, ratio: {}.".format(
num_nan_fp16_tensor, num_nan_fp16_tensor / torch.numel(input_tensor)
)
)
diff = torch.abs(input_tensor - fp16_tensor.float())
rel_diff = diff / torch.abs(input_tensor)
logger.info(
"fp32_to_fp16 abs error: min={}, max={}, avg={}.".format(
torch.min(diff), torch.max(diff), torch.mean(diff)
)
)
rel_diff_not_nan = rel_diff[torch.logical_not(torch.isnan(rel_diff))]
logger.info(
"fp32_to_fp16 rel error: min={}, max={}, avg={}.".format(
torch.min(rel_diff_not_nan),
torch.max(rel_diff_not_nan),
torch.mean(rel_diff_not_nan),
)
)
rel_diff_1_idx = torch.where(rel_diff == 1.0)
fp32_rel_err_1_vals = input_tensor[rel_diff_1_idx]
if torch.numel(fp32_rel_err_1_vals) > 0:
fp32_rel_err_1_vals = torch.abs(fp32_rel_err_1_vals)
logger.info(
"fp32_to_fp16 rel error == 1: fp32 min:{}, fp32 max:{}, fp32 avg:{}.".format(
torch.min(fp32_rel_err_1_vals),
torch.max(fp32_rel_err_1_vals),
torch.mean(fp32_rel_err_1_vals),
)
)
subrange_ratio = torch.numel(fp16_tensor[rel_diff_1_idx]) / torch.numel(
fp16_tensor
)
logger.info("sub fp16 range ratio: {}".format(subrange_ratio)) |
9 | get six digit naics count | from rest_framework.request import Request
from rest_framework.response import Response
from rest_framework.views import APIView
from django.db.models.functions import Length
from django.db.models import Q
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.validator.tinyshield import TinyShield
from usaspending_api.references.models import NAICS
from usaspending_api.references.v2.views.filter_tree.filter_tree import DEFAULT_CHILDREN
class NAICSViewSet(APIView):
"""Return a list of NAICS or a filtered list of NAICS"""
endpoint_doc = "usaspending_api/api_contracts/contracts/v2/references/naics.md"
naics_queryset = NAICS.objects.annotate(text_len=Length("code"))
def METHOD_NAME(self, code: str) -> int:
return self.naics_queryset.filter(code__startswith=code, text_len=6).count()
def _parse_and_validate_request(self, requested_naics: str, request_data) -> dict:
data = {"code": requested_naics, "filter": request_data.get("filter")}
models = [
{"key": "code", "name": "code", "type": "integer", "allow_nulls": True, "optional": True},
{
"key": "filter",
"name": "filter",
"type": "text",
"text_type": "search",
"default": None,
"optional": True,
"allow_nulls": True,
},
]
return TinyShield(models).block(data)
def _fetch_children(self, naics_code) -> list:
length = len(naics_code) + 2
results = [
{
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code) if len(naics.code) < 6 else DEFAULT_CHILDREN,
}
for naics in self.naics_queryset.filter(code__startswith=naics_code, text_len=length)
]
return sorted(results, key=lambda x: x["naics"])
def _filter_search(self, naics_filter: dict) -> dict:
search_filter = Q(description__icontains=naics_filter["description__icontains"])
search_filter |= Q(code__icontains=naics_filter["description__icontains"])
if naics_filter.get("code"):
search_filter &= Q(code__startswith=naics_filter["code"])
tier1_codes = set()
tier2_codes = set()
tier3_codes = set()
naics_list = list(self.naics_queryset.filter(search_filter))
tier3_naics = [naics for naics in naics_list if naics.text_len == 6]
tier2_naics = [naics for naics in naics_list if naics.text_len == 4]
tier1_naics = [naics for naics in naics_list if naics.text_len == 2]
for naics in tier3_naics:
tier3_codes.add(naics.code)
tier2_codes.add(naics.code[:4])
tier1_codes.add(naics.code[:2])
for naics in tier2_naics:
tier2_codes.add(naics.code)
tier1_codes.add(naics.code[:2])
extra_tier2_naics = self.naics_queryset.filter(code__in=tier2_codes, text_len=4)
extra_tier1_naics = self.naics_queryset.filter(code__in=tier1_codes, text_len=2)
tier2 = set(list(tier2_naics)) | set(list(extra_tier2_naics))
tier1 = set(list(tier1_naics)) | set(list(extra_tier1_naics))
tier2_results = {}
for naics in tier2:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code),
"children": [],
}
tier2_results[naics.code] = result
for naics in tier3_naics:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": DEFAULT_CHILDREN,
}
tier2_results[naics.code[:4]]["children"].append(result)
tier2_results[naics.code[:4]]["children"].sort(key=lambda x: x["naics"])
tier1_results = {}
for naics in tier1:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code),
"children": [],
}
tier1_results[naics.code] = result
for key in tier2_results.keys():
tier1_results[key[:2]]["children"].append(tier2_results[key])
tier1_results[key[:2]]["children"].sort(key=lambda x: x["naics"])
results = [tier1_results[key] for key in tier1_results.keys()]
return {"results": sorted(results, key=lambda x: x["naics"])}
def _default_view(self) -> dict:
results = [
{
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code),
}
for naics in self.naics_queryset.filter(text_len=2)
]
return {"results": sorted(results, key=lambda x: x["naics"])}
def _business_logic(self, request_data: dict) -> dict:
naics_filter = {}
code = request_data.get("code")
description = request_data.get("filter")
if not code and not description:
return self._default_view()
if code:
naics_filter.update({"code": code})
if description:
naics_filter.update({"description__icontains": description})
return self._filter_search(naics_filter)
results = []
for naics in self.naics_queryset.filter(Q(**naics_filter)):
if naics.text_len < 6:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": self.METHOD_NAME(naics.code),
"children": self._fetch_children(naics.code),
}
else:
result = {
"naics": naics.code,
"naics_description": naics.description,
"count": DEFAULT_CHILDREN,
}
results.append(result)
return {"results": results}
@cache_response()
def get(self, request: Request, requested_naics: str = None) -> Response:
request_data = self._parse_and_validate_request(requested_naics, request.GET)
results = self._business_logic(request_data)
return Response(results) |
10 | test ties broken alphabetically | # These tests are auto-generated with test data from:
# https://github.com/exercism/problem-specifications/tree/main/exercises/tournament/canonical-data.json
# File last updated on 2023-07-19
import unittest
from tournament import (
tally,
)
class TournamentTest(unittest.TestCase):
def test_just_the_header_if_no_input(self):
results = []
table = ["Team | MP | W | D | L | P"]
self.assertEqual(tally(results), table)
def test_a_win_is_three_points_a_loss_is_zero_points(self):
results = ["Allegoric Alaskans;Blithering Badgers;win"]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 1 | 1 | 0 | 0 | 3",
"Blithering Badgers | 1 | 0 | 0 | 1 | 0",
]
self.assertEqual(tally(results), table)
def test_a_win_can_also_be_expressed_as_a_loss(self):
results = ["Blithering Badgers;Allegoric Alaskans;loss"]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 1 | 1 | 0 | 0 | 3",
"Blithering Badgers | 1 | 0 | 0 | 1 | 0",
]
self.assertEqual(tally(results), table)
def test_a_different_team_can_win(self):
results = ["Blithering Badgers;Allegoric Alaskans;win"]
table = [
"Team | MP | W | D | L | P",
"Blithering Badgers | 1 | 1 | 0 | 0 | 3",
"Allegoric Alaskans | 1 | 0 | 0 | 1 | 0",
]
self.assertEqual(tally(results), table)
def test_a_draw_is_one_point_each(self):
results = ["Allegoric Alaskans;Blithering Badgers;draw"]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 1 | 0 | 1 | 0 | 1",
"Blithering Badgers | 1 | 0 | 1 | 0 | 1",
]
self.assertEqual(tally(results), table)
def test_there_can_be_more_than_one_match(self):
results = [
"Allegoric Alaskans;Blithering Badgers;win",
"Allegoric Alaskans;Blithering Badgers;win",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 2 | 2 | 0 | 0 | 6",
"Blithering Badgers | 2 | 0 | 0 | 2 | 0",
]
self.assertEqual(tally(results), table)
def test_there_can_be_more_than_one_winner(self):
results = [
"Allegoric Alaskans;Blithering Badgers;loss",
"Allegoric Alaskans;Blithering Badgers;win",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 2 | 1 | 0 | 1 | 3",
"Blithering Badgers | 2 | 1 | 0 | 1 | 3",
]
self.assertEqual(tally(results), table)
def test_there_can_be_more_than_two_teams(self):
results = [
"Allegoric Alaskans;Blithering Badgers;win",
"Blithering Badgers;Courageous Californians;win",
"Courageous Californians;Allegoric Alaskans;loss",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 2 | 2 | 0 | 0 | 6",
"Blithering Badgers | 2 | 1 | 0 | 1 | 3",
"Courageous Californians | 2 | 0 | 0 | 2 | 0",
]
self.assertEqual(tally(results), table)
def test_typical_input(self):
results = [
"Allegoric Alaskans;Blithering Badgers;win",
"Devastating Donkeys;Courageous Californians;draw",
"Devastating Donkeys;Allegoric Alaskans;win",
"Courageous Californians;Blithering Badgers;loss",
"Blithering Badgers;Devastating Donkeys;loss",
"Allegoric Alaskans;Courageous Californians;win",
]
table = [
"Team | MP | W | D | L | P",
"Devastating Donkeys | 3 | 2 | 1 | 0 | 7",
"Allegoric Alaskans | 3 | 2 | 0 | 1 | 6",
"Blithering Badgers | 3 | 1 | 0 | 2 | 3",
"Courageous Californians | 3 | 0 | 1 | 2 | 1",
]
self.assertEqual(tally(results), table)
def test_incomplete_competition_not_all_pairs_have_played(self):
results = [
"Allegoric Alaskans;Blithering Badgers;loss",
"Devastating Donkeys;Allegoric Alaskans;loss",
"Courageous Californians;Blithering Badgers;draw",
"Allegoric Alaskans;Courageous Californians;win",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 3 | 2 | 0 | 1 | 6",
"Blithering Badgers | 2 | 1 | 1 | 0 | 4",
"Courageous Californians | 2 | 0 | 1 | 1 | 1",
"Devastating Donkeys | 1 | 0 | 0 | 1 | 0",
]
self.assertEqual(tally(results), table)
def METHOD_NAME(self):
results = [
"Courageous Californians;Devastating Donkeys;win",
"Allegoric Alaskans;Blithering Badgers;win",
"Devastating Donkeys;Allegoric Alaskans;loss",
"Courageous Californians;Blithering Badgers;win",
"Blithering Badgers;Devastating Donkeys;draw",
"Allegoric Alaskans;Courageous Californians;draw",
]
table = [
"Team | MP | W | D | L | P",
"Allegoric Alaskans | 3 | 2 | 1 | 0 | 7",
"Courageous Californians | 3 | 2 | 1 | 0 | 7",
"Blithering Badgers | 3 | 0 | 1 | 2 | 1",
"Devastating Donkeys | 3 | 0 | 1 | 2 | 1",
]
self.assertEqual(tally(results), table)
def test_ensure_points_sorted_numerically(self):
results = [
"Devastating Donkeys;Blithering Badgers;win",
"Devastating Donkeys;Blithering Badgers;win",
"Devastating Donkeys;Blithering Badgers;win",
"Devastating Donkeys;Blithering Badgers;win",
"Blithering Badgers;Devastating Donkeys;win",
]
table = [
"Team | MP | W | D | L | P",
"Devastating Donkeys | 5 | 4 | 0 | 1 | 12",
"Blithering Badgers | 5 | 1 | 0 | 4 | 3",
]
self.assertEqual(tally(results), table) |
11 | get severity level | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
from typing import Sequence, Any
from opentelemetry._logs.severity import SeverityNumber
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.sdk._logs import LogData
from opentelemetry.sdk._logs.export import LogExporter, LogExportResult
from azure.monitor.opentelemetry.exporter import _utils
from azure.monitor.opentelemetry.exporter._constants import (
_EXCEPTION_ENVELOPE_NAME,
_MESSAGE_ENVELOPE_NAME,
)
from azure.monitor.opentelemetry.exporter._generated.models import (
MessageData,
MonitorBase,
TelemetryEventData,
TelemetryExceptionData,
TelemetryExceptionDetails,
TelemetryItem,
)
from azure.monitor.opentelemetry.exporter.export._base import (
BaseExporter,
ExportResult,
)
_logger = logging.getLogger(__name__)
_DEFAULT_SPAN_ID = 0
_DEFAULT_TRACE_ID = 0
__all__ = ["AzureMonitorLogExporter"]
_APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE = "APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE"
class AzureMonitorLogExporter(BaseExporter, LogExporter):
"""Azure Monitor Log exporter for OpenTelemetry."""
def export(
self, batch: Sequence[LogData], **kwargs: Any # pylint: disable=unused-argument
) -> LogExportResult:
"""Export log data.
:param batch: OpenTelemetry LogData(s) to export.
:type batch: ~typing.Sequence[~opentelemetry._logs.LogData]
:return: The result of the export.
:rtype: ~opentelemetry.sdk._logs.export.LogData
"""
envelopes = [self._log_to_envelope(log) for log in batch]
try:
result = self._transmit(envelopes)
self._handle_transmit_from_storage(envelopes, result)
return _get_log_export_result(result)
except Exception: # pylint: disable=broad-except
_logger.exception("Exception occurred while exporting the data.")
return _get_log_export_result(ExportResult.FAILED_NOT_RETRYABLE)
def shutdown(self) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
self.storage.close()
def _log_to_envelope(self, log_data: LogData) -> TelemetryItem:
if not log_data:
return None
envelope = _convert_log_to_envelope(log_data)
envelope.instrumentation_key = self._instrumentation_key
return envelope
@classmethod
def from_connection_string(
cls, conn_str: str, **kwargs: Any
) -> "AzureMonitorLogExporter":
"""
Create an AzureMonitorLogExporter from a connection string.
This is the recommended way of instantation if a connection string is passed in explicitly.
If a user wants to use a connection string provided by environment variable, the constructor
of the exporter can be called directly.
:param str conn_str: The connection string to be used for authentication.
:keyword str api_version: The service API version used. Defaults to latest.
:returns an instance of ~AzureMonitorLogExporter
:rtype ~azure.monitor.opentelemetry.exporter.AzureMonitorLogExporter
"""
return cls(connection_string=conn_str, **kwargs)
def _log_data_is_event(log_data: LogData):
log_record = log_data.log_record
is_event = log_record.attributes.get(_APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE)
return is_event is True
# pylint: disable=protected-access
def _convert_log_to_envelope(log_data: LogData) -> TelemetryItem:
log_record = log_data.log_record
time_stamp = log_record.timestamp if log_record.timestamp is not None else log_record.observed_timestamp
envelope = _utils._create_telemetry_item(time_stamp)
envelope.tags.update(_utils._populate_part_a_fields(log_record.resource))
envelope.tags["ai.operation.id"] = "{:032x}".format(
log_record.trace_id or _DEFAULT_TRACE_ID
)
envelope.tags["ai.operation.parentId"] = "{:016x}".format(
log_record.span_id or _DEFAULT_SPAN_ID
)
properties = _utils._filter_custom_properties(
log_record.attributes,
lambda key, val: not _is_ignored_attribute(key)
)
exc_type = log_record.attributes.get(SpanAttributes.EXCEPTION_TYPE)
exc_message = log_record.attributes.get(SpanAttributes.EXCEPTION_MESSAGE)
# pylint: disable=line-too-long
stack_trace = log_record.attributes.get(SpanAttributes.EXCEPTION_STACKTRACE)
severity_level = METHOD_NAME(log_record.severity_number)
if not log_record.body:
log_record.body = "n/a"
# Event telemetry
if _log_data_is_event(log_data):
envelope.name = 'Microsoft.ApplicationInsights.Event'
data = TelemetryEventData(
name=str(log_record.body)[:32768],
properties=properties,
)
envelope.data = MonitorBase(base_data=data, base_type="EventData")
# Exception telemetry
elif exc_type is not None or exc_message is not None:
envelope.name = _EXCEPTION_ENVELOPE_NAME
has_full_stack = stack_trace is not None
if not exc_message:
exc_message = "Exception"
exc_details = TelemetryExceptionDetails(
type_name=str(exc_type)[:1024],
message=str(exc_message)[:32768],
has_full_stack=has_full_stack,
stack=str(stack_trace)[:32768],
)
data = TelemetryExceptionData(
severity_level=severity_level,
properties=properties,
exceptions=[exc_details],
)
# pylint: disable=line-too-long
envelope.data = MonitorBase(base_data=data, base_type="ExceptionData")
else: # Message telemetry
envelope.name = _MESSAGE_ENVELOPE_NAME
# pylint: disable=line-too-long
# Severity number: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber
data = MessageData(
message=str(log_record.body)[:32768],
severity_level=severity_level,
properties=properties,
)
envelope.data = MonitorBase(base_data=data, base_type="MessageData")
return envelope
def _get_log_export_result(result: ExportResult) -> LogExportResult:
if result == ExportResult.SUCCESS:
return LogExportResult.SUCCESS
if result in (
ExportResult.FAILED_RETRYABLE,
ExportResult.FAILED_NOT_RETRYABLE,
):
return LogExportResult.FAILURE
return None
# pylint: disable=line-too-long
# Common schema: https://github.com/microsoft/common-schema/blob/main/Mappings/AzureMonitor-AI.md#messageseveritylevel
# SeverityNumber specs: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber
def METHOD_NAME(severity_number: SeverityNumber):
if severity_number.value < 9:
return 0
return int((severity_number.value - 1) / 4 - 1)
def _is_ignored_attribute(key: str) -> bool:
return key in _IGNORED_ATTRS
_IGNORED_ATTRS = frozenset(
(
SpanAttributes.EXCEPTION_TYPE,
SpanAttributes.EXCEPTION_MESSAGE,
SpanAttributes.EXCEPTION_STACKTRACE,
SpanAttributes.EXCEPTION_ESCAPED,
_APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE,
)
) |
12 | delete empty color blocks | # Authors: see git history
#
# Copyright (c) 2010 Authors
# Licensed under the GNU GPL version 3.0 or later. See the file LICENSE for details.
from sys import exit
from typing import List
from inkex import errormsg
from ..i18n import _
from ..svg import PIXELS_PER_MM
from ..utils.geometry import Point
from ..utils.threading import check_stop_flag
from .color_block import ColorBlock
def stitch_groups_to_stitch_plan(stitch_groups, collapse_len=None, min_stitch_len=0.1, disable_ties=False): # noqa: C901
"""Convert a collection of StitchGroups to a StitchPlan.
* applies instructions embedded in the StitchGroup such as trim_after and stop_after
* adds tie-ins and tie-offs
* adds jump-stitches between stitch_group if necessary
"""
if not stitch_groups:
errormsg(_("There is no selected stitchable element. Please run "
"Extensions > Ink/Stitch > Troubleshoot > Troubleshoot objects in case you have expected a stitchout."))
exit(1)
if collapse_len is None:
collapse_len = 3.0
collapse_len = collapse_len * PIXELS_PER_MM
stitch_plan = StitchPlan()
color_block = stitch_plan.new_color_block(color=stitch_groups[0].color)
previous_stitch_group = None
need_tie_in = True
for stitch_group in stitch_groups:
check_stop_flag()
if not stitch_group.stitches:
continue
if color_block.color != stitch_group.color:
# add a lock stitch to the last element of the previous group
if not need_tie_in:
lock_stitches = previous_stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
# end the previous block with a color change
color_block.add_stitch(color_change=True)
# make a new block of our color
color_block = stitch_plan.new_color_block(color=stitch_group.color)
else:
if (len(color_block) and not need_tie_in and
((stitch_group.stitches[0] - color_block.stitches[-1]).length() > collapse_len or
previous_stitch_group.force_lock_stitches)):
lock_stitches = previous_stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
if need_tie_in is True:
lock_stitches = stitch_group.get_lock_stitches("start", disable_ties)
if lock_stitches:
color_block.add_stitch(lock_stitches[0], jump=True)
color_block.add_stitches(stitches=lock_stitches)
else:
color_block.add_stitch(stitch_group.stitches[0], jump=True)
need_tie_in = False
color_block.add_stitches(stitches=stitch_group.stitches)
if stitch_group.trim_after or stitch_group.stop_after:
lock_stitches = stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
need_tie_in = True
if stitch_group.trim_after:
color_block.add_stitch(trim=True)
if stitch_group.stop_after:
color_block.add_stitch(stop=True)
previous_stitch_group = stitch_group
if not need_tie_in:
# tie off at the end if we haven't already
lock_stitches = stitch_group.get_lock_stitches("end", disable_ties)
if lock_stitches:
color_block.add_stitches(stitches=lock_stitches)
if len(color_block) == 0:
# last block ended in a stop, so now we have an empty block
del stitch_plan.color_blocks[-1]
stitch_plan.filter_duplicate_stitches(min_stitch_len)
return stitch_plan
class StitchPlan(object):
"""Holds a set of color blocks, each containing stitches."""
def __init__(self):
self.color_blocks = []
def new_color_block(self, *args, **kwargs):
color_block = ColorBlock(*args, **kwargs)
self.color_blocks.append(color_block)
return color_block
def METHOD_NAME(self):
color_blocks = []
for color_block in self.color_blocks:
if len(color_block) > 0:
color_blocks.append(color_block)
self.color_blocks = color_blocks
def add_color_block(self, color_block):
self.color_blocks.append(color_block)
def filter_duplicate_stitches(self, min_stitch_len):
for color_block in self:
color_block.filter_duplicate_stitches(min_stitch_len)
def __iter__(self):
return iter(self.color_blocks)
def __len__(self):
return len(self.color_blocks)
def __repr__(self):
return "StitchPlan(%s)" % ", ".join(repr(cb) for cb in self.color_blocks)
def __json__(self):
return dict(color_blocks=self.color_blocks,
num_stops=self.num_stops,
num_trims=self.num_trims,
num_stitches=self.num_stitches,
bounding_box=self.bounding_box,
estimated_thread=self.estimated_thread
)
@property
def num_colors(self):
"""Number of unique colors in the stitch plan."""
return len({block.color for block in self})
@property
def num_color_blocks(self):
return len(self.color_blocks)
@property
def num_stops(self):
return sum(1 for block in self if block.stop_after)
@property
def num_trims(self):
return sum(block.num_trims for block in self)
@property
def num_stitches(self):
return sum(block.num_stitches for block in self)
@property
def bounding_box(self):
color_block_bounding_boxes = [cb.bounding_box for cb in self]
minx = min(bb[0] for bb in color_block_bounding_boxes)
miny = min(bb[1] for bb in color_block_bounding_boxes)
maxx = max(bb[2] for bb in color_block_bounding_boxes)
maxy = max(bb[3] for bb in color_block_bounding_boxes)
return minx, miny, maxx, maxy
@property
def estimated_thread(self):
thread_meter = sum(block.estimated_thread for block in self) / PIXELS_PER_MM / 1000
return round(thread_meter, 2)
@property
def dimensions(self):
minx, miny, maxx, maxy = self.bounding_box
return (maxx - minx, maxy - miny)
@property
def extents(self):
minx, miny, maxx, maxy = self.bounding_box
return max(-minx, maxx), max(-miny, maxy)
@property
def dimensions_mm(self):
dimensions = self.dimensions
return (dimensions[0] / PIXELS_PER_MM, dimensions[1] / PIXELS_PER_MM)
@property
def last_color_block(self):
if self.color_blocks:
return self.color_blocks[-1]
else:
return None
def make_offsets(self, offsets: List[Point]):
out = StitchPlan()
out.color_blocks = [block.make_offsets(offsets) for block in self]
return out |
13 | test print topic help with devel for | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015-2018 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import pydoc
from unittest import mock
import fixtures
from testtools.matchers import Contains, Equals, StartsWith
from snapcraft_legacy.cli._runner import run
from snapcraft_legacy.cli.help import _TOPICS
from tests.legacy import fixture_setup
from . import CommandBaseTestCase
class HelpCommandBaseTestCase(CommandBaseTestCase):
def setUp(self):
super().setUp()
# pydoc pager guess can fail, for tests we want a plain pager
# anyway
p = mock.patch("pydoc.pager", new=pydoc.plainpager)
p.start()
self.addCleanup(p.stop)
class HelpCommandTestCase(HelpCommandBaseTestCase):
def test_topic_and_plugin_not_found_exits_with_tip(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
result = self.run_command(["help", "does-not-exist"])
self.assertThat(result.exit_code, Equals(1))
self.assertThat(
result.output, Contains("There is no help topic, plugin or command")
)
def test_topic_and_plugin_adds_ellipsis_for_long_arg(self):
fake_logger = fixtures.FakeLogger(level=logging.ERROR)
self.useFixture(fake_logger)
result = self.run_command(["help", "1234567890123"])
self.assertThat(result.exit_code, Equals(1))
self.assertThat(result.output, Contains("1234567890..."))
def test_print_module_help_for_valid_plugin_default_base(self):
result = self.run_command(["help", "nil"])
expected = "Displaying help for the 'nil' plugin for 'core20'."
output = result.output[: len(expected)]
self.assertThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_module_help_for_valid_plugin_with_base(self):
result = self.run_command(["help", "nil", "--base", "core20"])
expected = "Displaying help for the 'nil' plugin for 'core20'."
output = result.output[: len(expected)]
self.expectThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_module_help_for_valid_plugin_snapcraft_yaml(self):
self.useFixture(
fixture_setup.SnapcraftYaml(
self.path,
base="core18",
parts={"part1": {"source": ".", "plugin": "nil"}},
)
)
result = self.run_command(["help", "python", "--base", "core20"])
expected = (
"Displaying help for the 'python' plugin for 'core20'.\n\n"
"The python plugin can be used for"
)
output = result.output[: len(expected)]
self.assertThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_module_named_with_dashes_help_for_valid_plugin(self):
result = self.run_command(["help", "catkin-tools", "--base", "core20"])
expected = "Displaying help for the 'catkin-tools' plugin for 'core20'."
self.assertThat(result.output, StartsWith(expected))
def test_show_module_help_with_devel_for_valid_plugin(self):
result = self.run_command(["help", "nil", "--devel"])
expected = (
"Help on module snapcraft_legacy.plugins.v2.nil in snapcraft_legacy.plugins"
)
output = result.output[: len(expected)]
self.assertThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_topics(self):
result = self.run_command(["help", "topics"])
output = result.output.strip().split("\n")
for t in _TOPICS:
self.assertTrue(
t in output, "Missing topic: {!r} in {!r}".format(t, output)
)
def test_print_topic_help_for_valid_topic(self):
result = self.run_command(["help", "sources"])
expected = "Common 'source' options."
output = result.output[: len(expected)]
self.assertThat(
output,
Equals(expected),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected, output),
)
def test_print_generic_help_by_default(self):
result = self.run_command(["help"])
self.assertThat(
result.output, Contains("Snapcraft is a delightful packaging tool.")
)
self.assertThat(result.output, Contains("For more help"))
def test_no_unicode_in_help_strings(self):
helps = ["topics"]
for key in _TOPICS.keys():
helps.append(str(key))
# Get a list of plugins
import os
from pathlib import Path
import snapcraft_legacy.plugins
for plugin in Path(snapcraft_legacy.plugins.__path__[0]).glob("*.py"):
if os.path.isfile(str(plugin)) and not os.path.basename(
str(plugin)
).startswith("_"):
helps.append(os.path.basename(str(plugin)[:-3]))
for key in helps:
result = self.run_command(["help", key])
# An UnicodeEncodeError will be raised if the help text has
# non-ASCII characters.
result.output.encode("ascii")
class TopicWithDevelTestCase(HelpCommandBaseTestCase):
def METHOD_NAME(self):
expected = {
"sources": "Help on package snapcraft",
"plugins": "Help on package snapcraft",
}
for topic in _TOPICS:
result = self.run_command(["help", topic, "--devel"])
output = result.output[: len(expected[topic])]
self.assertThat(
output,
Equals(expected[topic]),
"The help message does not start with {!r} but with "
"{!r} instead".format(expected[topic], output),
)
class TestHelpForCommand(HelpCommandBaseTestCase):
def test_help_for_command(self):
for command in run.commands:
result = self.run_command(["help", command])
self.assertThat(result.exit_code, Equals(0))
# Verify that the first line of help text is correct
# to ensure no name squatting takes place.
self.assertThat(
result.output, Contains(run.commands[command].help.split("\n")[0])
) |
14 | test type raises for unknown type of | import pytest
from h import models
from h.models.group import AUTHORITY_PROVIDED_ID_MAX_LENGTH, ReadableBy, WriteableBy
def test_init_sets_given_attributes():
group = models.Group(name="My group", authority="example.com", enforce_scope=False)
assert group.name == "My group"
assert group.authority == "example.com"
assert not group.enforce_scope
def test_with_short_name():
"""Should raise ValueError if name shorter than 3 characters."""
with pytest.raises(ValueError):
models.Group(name="ab")
def test_with_long_name():
"""Should raise ValueError if name longer than 25 characters."""
with pytest.raises(ValueError):
models.Group(name="abcdefghijklmnopqrstuvwxyz")
def test_enforce_scope_is_True_by_default(db_session, factories):
user = factories.User()
group = models.Group(name="Foobar", authority="foobar.com", creator=user)
db_session.add(group)
db_session.flush()
assert group.enforce_scope is True
def test_enforce_scope_can_be_set_False(db_session, factories):
user = factories.User()
group = models.Group(
name="Foobar", authority="foobar.com", creator=user, enforce_scope=False
)
db_session.add(group)
db_session.flush()
assert not group.enforce_scope
def test_slug(db_session, factories, organization):
name = "My Hypothesis Group"
user = factories.User()
group = models.Group(
name=name,
authority="foobar.com",
creator=user,
organization=organization,
)
db_session.add(group)
db_session.flush()
assert group.slug == "my-hypothesis-group"
def test_type_returns_open_for_open_groups(factories):
assert factories.OpenGroup().type == "open"
def test_type_returns_private_for_private_groups(factories):
assert factories.Group().type == "private"
def test_type_returns_restricted_for_restricted_groups(factories):
assert factories.RestrictedGroup().type == "restricted"
def test_it_returns_None_by_default_for_authority_provided_id():
group = models.Group(name="abcdefg")
assert group.authority_provided_id is None
def test_it_returns_None_for_groupid_if_authority_provided_id_is_None(factories):
group = factories.Group(authority_provided_id=None)
assert group.groupid is None
def test_it_returns_formatted_groupid_if_authority_provided_id(factories):
group = factories.Group()
group.authority_provided_id = "hithere"
assert group.groupid == f"group:hithere@{group.authority}"
def test_groupid_setter_raises_ValueError_if_groupid_invalid(factories):
group = factories.Group()
with pytest.raises(ValueError, match="isn't a valid groupid"):
group.groupid = "nonsense"
def test_groupid_setter_sets_consistuent_fields(factories):
group = factories.Group()
group.groupid = "group:onetwo@threefour.com"
assert group.authority_provided_id == "onetwo"
assert group.authority == "threefour.com"
def test_groupid_setter_accepts_None_and_nullifies_authority_provided_id(factories):
group = factories.Group()
group.groupid = "group:onetwo@threefour.com"
group.groupid = None
assert group.groupid is None
assert group.authority == "threefour.com"
assert group.authority_provided_id is None
@pytest.mark.parametrize(
"authority_provided_id", ["%%&whatever", "^flop", "#---", "ßeta"]
)
def test_it_raises_ValueError_if_invalid_authority_provided_id(authority_provided_id):
group = models.Group(name="abcdefg")
with pytest.raises(ValueError, match="authority_provided_id must only contain"):
group.authority_provided_id = authority_provided_id
def test_it_raises_ValueError_if_authority_provided_id_too_long():
group = models.Group(name="abcdefg")
with pytest.raises(ValueError, match="characters or fewer"):
group.authority_provided_id = "a" * (AUTHORITY_PROVIDED_ID_MAX_LENGTH + 1)
def test_it_allows_authority_provided_id_to_be_None():
group = models.Group(name="abcdefg")
group.authority_provided_id = None
def METHOD_NAME(factories):
group = factories.Group()
# Set the group's access flags to an invalid / unused combination.
group.joinable_by = None
group.readable_by = ReadableBy.members
group.writeable_by = WriteableBy.authority
expected_err = "^This group doesn't seem to match any known type"
with pytest.raises(ValueError, match=expected_err):
_ = group.type
def test_you_cannot_set_type(factories):
group = factories.Group()
with pytest.raises(AttributeError, match="can't set attribute"):
group.type = "open"
def test_repr(db_session, factories, organization):
name = "My Hypothesis Group"
user = factories.User()
group = models.Group(
name=name,
authority="foobar.com",
creator=user,
organization=organization,
)
db_session.add(group)
db_session.flush()
assert repr(group) == "<Group: my-hypothesis-group>"
def test_group_organization(db_session):
name = "My Hypothesis Group"
org = models.Organization(name="My Organization", authority="foobar.com")
db_session.add(org)
db_session.flush()
group = models.Group(name=name, authority="foobar.com", organization=org)
db_session.add(group)
db_session.flush()
assert group.organization == org
assert group.organization_id == org.id
def test_created_by(db_session, factories, organization):
name_1 = "My first group"
name_2 = "My second group"
user = factories.User()
group_1 = models.Group(
name=name_1,
authority="foobar.com",
creator=user,
organization=organization,
)
group_2 = models.Group(
name=name_2,
authority="foobar.com",
creator=user,
organization=organization,
)
db_session.add_all([group_1, group_2])
db_session.flush()
assert models.Group.created_by(db_session, user).all() == [group_1, group_2]
def test_public_group():
group = models.Group(readable_by=ReadableBy.world)
assert group.is_public
def test_non_public_group():
group = models.Group(readable_by=ReadableBy.members)
assert not group.is_public
@pytest.fixture()
def organization(factories):
return factories.Organization() |
15 | prep param lists | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half()))
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
if isinstance(module, torch.nn.RNNBase) or isinstance(module, torch.nn.modules.rnn.RNNBase):
module.flatten_parameters()
return network
class FP16Model(nn.Module):
"""
Convert model to half precision in a batchnorm-safe way.
"""
def __init__(self, network):
from apex import deprecated_warning
deprecated_warning("apex.fp16_utils is deprecated and will be removed by the end of February 2023. Use [PyTorch AMP](https://pytorch.org/docs/stable/amp.html)")
super(FP16Model, self).__init__()
self.network = convert_network(network, dtype=torch.half)
def forward(self, *inputs):
inputs = tuple(t.half() for t in inputs)
return self.network(*inputs)
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!")
def METHOD_NAME(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data)
# Backward compatibility fixes
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0]
TORCH_MAJOR = int(torch.__version__.split('.')[0])
TORCH_MINOR = int(torch.__version__.split('.')[1])
if TORCH_MAJOR == 0 and TORCH_MINOR <= 4:
clip_grad_norm = torch.nn.utils.clip_grad_norm
else:
clip_grad_norm = torch.nn.utils.clip_grad_norm_ |
16 | test results display | import pytest
from grandchallenge.components.models import (
ComponentInterface,
ComponentInterfaceValue,
)
from grandchallenge.evaluation.models import Evaluation, Phase
from grandchallenge.evaluation.tasks import calculate_ranks
from tests.evaluation_tests.factories import EvaluationFactory, PhaseFactory
from tests.factories import UserFactory
@pytest.mark.django_db
def test_calculate_ranks(django_assert_max_num_queries):
phase = PhaseFactory()
results = [
# Warning: Do not change this values without updating the
# expected_ranks below.
{"a": 0.0, "b": 0.0},
{"a": 0.5, "b": 0.2},
{"a": 1.0, "b": 0.3},
{"a": 0.7, "b": 0.4},
{"a": 0.5, "b": 0.5},
# Following two are invalid as they are incomplete
{"a": 1.0},
{"b": 0.3},
# Add a valid, but unpublished result
{"a": 0.1, "b": 0.1},
]
queryset = [
EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
for _ in range(len(results))
]
for e, r in zip(queryset, results, strict=True):
e.outputs.add(
ComponentInterfaceValue.objects.create(
interface=ComponentInterface.objects.get(
slug="metrics-json-file"
),
value=r,
)
)
# Unpublish the result
queryset[-1].published = False
queryset[-1].save()
expected = {
Phase.DESCENDING: {
Phase.ABSOLUTE: {
Phase.DESCENDING: {
"ranks": [5, 3, 1, 2, 3, 0, 0, 0],
"rank_scores": [5, 3, 1, 2, 3, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [5, 3, 1, 2, 3, 0, 0, 0],
"rank_scores": [5, 3, 1, 2, 3, 0, 0, 0],
},
},
Phase.MEDIAN: {
Phase.DESCENDING: {
"ranks": [5, 4, 1, 1, 1, 0, 0, 0],
"rank_scores": [5, 3.5, 2, 2, 2, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [3, 2, 1, 3, 5, 0, 0, 0],
"rank_scores": [3, 2.5, 2, 3, 4, 0, 0, 0],
},
},
Phase.MEAN: {
Phase.DESCENDING: {
"ranks": [5, 4, 1, 1, 1, 0, 0, 0],
"rank_scores": [5, 3.5, 2, 2, 2, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [3, 2, 1, 3, 5, 0, 0, 0],
"rank_scores": [3, 2.5, 2, 3, 4, 0, 0, 0],
},
},
},
Phase.ASCENDING: {
Phase.ABSOLUTE: {
Phase.DESCENDING: {
"ranks": [1, 2, 5, 4, 2, 0, 0, 0],
"rank_scores": [1, 2, 5, 4, 2, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [1, 2, 5, 4, 2, 0, 0, 0],
"rank_scores": [1, 2, 5, 4, 2, 0, 0, 0],
},
},
Phase.MEDIAN: {
Phase.DESCENDING: {
"ranks": [2, 2, 5, 2, 1, 0, 0, 0],
"rank_scores": [3, 3, 4, 3, 1.5, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [1, 2, 4, 4, 3, 0, 0, 0],
"rank_scores": [1, 2, 4, 4, 3.5, 0, 0, 0],
},
},
Phase.MEAN: {
Phase.DESCENDING: {
"ranks": [2, 2, 5, 2, 1, 0, 0, 0],
"rank_scores": [3, 3, 4, 3, 1.5, 0, 0, 0],
},
Phase.ASCENDING: {
"ranks": [1, 2, 4, 4, 3, 0, 0, 0],
"rank_scores": [1, 2, 4, 4, 3.5, 0, 0, 0],
},
},
},
}
for score_method in (Phase.ABSOLUTE, Phase.MEDIAN, Phase.MEAN):
for a_order in (Phase.DESCENDING, Phase.ASCENDING):
for b_order in (Phase.DESCENDING, Phase.ASCENDING):
phase.score_jsonpath = "a"
phase.scoring_method_choice = score_method
phase.score_default_sort = a_order
phase.extra_results_columns = [
{"path": "b", "title": "b", "order": b_order}
]
phase.save()
with django_assert_max_num_queries(9):
calculate_ranks(phase_pk=phase.pk)
assert_ranks(
queryset,
expected[a_order][score_method][b_order]["ranks"],
expected[a_order][score_method][b_order]["rank_scores"],
)
@pytest.mark.django_db
def METHOD_NAME():
phase = PhaseFactory()
user1 = UserFactory()
user2 = UserFactory()
metrics = "metrics"
creator = "creator"
results = [
{metrics: {"b": 0.3}, creator: user1}, # Invalid result
{metrics: {"a": 0.6}, creator: user1},
{metrics: {"a": 0.4}, creator: user1},
{metrics: {"a": 0.2}, creator: user1},
{metrics: {"a": 0.1}, creator: user2},
{metrics: {"a": 0.5}, creator: user2},
{metrics: {"a": 0.3}, creator: user2},
]
queryset = [
EvaluationFactory(
submission__phase=phase,
submission__creator=r[creator],
status=Evaluation.SUCCESS,
)
for r in results
]
for e, r in zip(queryset, results, strict=True):
e.outputs.add(
ComponentInterfaceValue.objects.create(
interface=ComponentInterface.objects.get(
slug="metrics-json-file"
),
value=r[metrics],
)
)
phase.score_jsonpath = "a"
phase.result_display_choice = Phase.ALL
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 1, 3, 5, 6, 2, 4]
assert_ranks(queryset, expected_ranks)
phase.result_display_choice = Phase.MOST_RECENT
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 0, 0, 2, 0, 0, 1]
assert_ranks(queryset, expected_ranks)
phase.result_display_choice = Phase.BEST
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 1, 0, 0, 0, 2, 0]
assert_ranks(queryset, expected_ranks)
# now test reverse order
phase.score_default_sort = phase.ASCENDING
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 0, 0, 2, 1, 0, 0]
assert_ranks(queryset, expected_ranks)
phase.result_display_choice = Phase.MOST_RECENT
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [0, 0, 0, 1, 0, 0, 2]
assert_ranks(queryset, expected_ranks)
@pytest.mark.django_db
def test_null_results():
phase = PhaseFactory()
results = [{"a": 0.6}, {"a": None}]
queryset = [
EvaluationFactory(submission__phase=phase, status=Evaluation.SUCCESS)
for _ in range(len(results))
]
for e, r in zip(queryset, results, strict=True):
e.outputs.add(
ComponentInterfaceValue.objects.create(
interface=ComponentInterface.objects.get(
slug="metrics-json-file"
),
value=r,
)
)
phase.score_jsonpath = "a"
phase.result_display_choice = Phase.ALL
phase.save()
calculate_ranks(phase_pk=phase.pk)
expected_ranks = [1, 0]
assert_ranks(queryset, expected_ranks)
def assert_ranks(queryset, expected_ranks, expected_rank_scores=None):
for r in queryset:
r.refresh_from_db()
assert [r.rank for r in queryset] == expected_ranks
if expected_rank_scores:
assert [r.rank_score for r in queryset] == expected_rank_scores |
17 | nptensor2np | from typing import Tuple, Type
import numpy as np
from ..image_utils import MAX_VALUES_BY_DTYPE, as_3d
def np_denorm(x: np.ndarray, min_max: Tuple[float, float] = (-1.0, 1.0)) -> np.ndarray:
"""Denormalize from [-1,1] range to [0,1]
formula: xi' = (xi - mu)/sigma
Example: "out = (x + 1.0) / 2.0" for denorm
range (-1,1) to (0,1)
for use with proper act in Generator output (ie. tanh)
"""
out = (x - min_max[0]) / (min_max[1] - min_max[0])
return np.clip(out, 0, 1)
def np_norm(x: np.ndarray) -> np.ndarray:
"""Normalize (z-norm) from [0,1] range to [-1,1]"""
out = (x - 0.5) * 2.0
return np.clip(out, -1, 1)
def np_bgr_to_rgb(img: np.ndarray) -> np.ndarray:
out: np.ndarray = img[::-1, ...]
return out
def np_rgb_to_bgr(img: np.ndarray) -> np.ndarray:
# same operation as bgr_to_rgb(), flip image channels
return np_bgr_to_rgb(img)
def np_bgra_to_rgba(img: np.ndarray) -> np.ndarray:
out: np.ndarray = img[[2, 1, 0, 3], ...] # type: ignore
return out
def np_rgba_to_bgra(img: np.ndarray) -> np.ndarray:
# same operation as bgra_to_rgba(), flip image channels
return np_bgra_to_rgba(img)
def np2nptensor(
img: np.ndarray,
bgr2rgb=True,
data_range=1.0, # pylint: disable=unused-argument
normalize=False,
change_range=True,
add_batch=True,
) -> np.ndarray:
"""Converts a numpy image array into a numpy Tensor array.
Parameters:
img (numpy array): the input image numpy array
add_batch (bool): choose if new tensor needs batch dimension added
"""
# check how many channels the image has, then condition. ie. RGB, RGBA, Gray
# if bgr2rgb:
# img = img[
# :, :, [2, 1, 0]
# ] # BGR to RGB -> in numpy, if using OpenCV, else not needed. Only if image has colors.
if change_range:
dtype = img.dtype
maxval = MAX_VALUES_BY_DTYPE.get(dtype.name, 1.0)
t_dtype = np.dtype("float32")
img = img.astype(t_dtype) / maxval # ie: uint8 = /255
# "HWC to CHW" and "numpy to tensor"
img = np.ascontiguousarray(np.transpose(as_3d(img), (2, 0, 1))).astype(np.float32)
if bgr2rgb:
# BGR to RGB -> in tensor, if using OpenCV, else not needed. Only if image has colors.)
if (
img.shape[0] % 3 == 0
): # RGB or MultixRGB (3xRGB, 5xRGB, etc. For video tensors.)
img = np_bgr_to_rgb(img)
elif img.shape[0] == 4: # RGBA
img = np_bgra_to_rgba(img)
if add_batch:
img = np.expand_dims(
img, axis=0
) # Add fake batch dimension = 1 . squeeze() will remove the dimensions of size 1
if normalize:
img = np_norm(img)
return img
def METHOD_NAME(
img: np.ndarray,
rgb2bgr=True,
remove_batch=True,
data_range=255,
denormalize=False,
change_range=True,
imtype: Type = np.uint8,
) -> np.ndarray:
"""Converts a Tensor array into a numpy image array.
Parameters:
img (tensor): the input image tensor array
4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order
remove_batch (bool): choose if tensor of shape BCHW needs to be squeezed
denormalize (bool): Used to denormalize from [-1,1] range back to [0,1]
imtype (type): the desired type of the converted numpy array (np.uint8
default)
Output:
img (np array): 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default)
"""
n_dim = img.ndim
img = img.astype(np.float32)
if n_dim in (4, 3):
# if n_dim == 4, has to convert to 3 dimensions
if n_dim == 4 and remove_batch:
# remove a fake batch dimension
img = img.squeeze(0)
if img.shape[0] == 3 and rgb2bgr: # RGB
# RGB to BGR -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = np_rgb_to_bgr(img)
elif img.shape[0] == 4 and rgb2bgr: # RGBA
# RGBA to BGRA -> in tensor, if using OpenCV, else not needed. Only if image has colors.
img_np = np_rgba_to_bgra(img)
else:
img_np = img
img_np = np.transpose(img_np, (1, 2, 0)) # CHW to HWC
elif n_dim == 2:
img_np = img
else:
raise TypeError(
f"Only support 4D, 3D and 2D tensor. But received with dimension: {n_dim:d}"
)
# if rgb2bgr:
# img_np = img_np[[2, 1, 0], :, :] #RGB to BGR -> in numpy, if using OpenCV, else not needed. Only if image has colors.
# TODO: Check: could denormalize in the begining in tensor form instead
if denormalize:
img_np = np_denorm(img_np) # denormalize if needed
if change_range:
img_np = np.clip(
data_range * img_np, 0, data_range # type: ignore
).round() # np.clip to the data_range
# has to be in range (0,255) before changing to np.uint8, else np.float32
return img_np.astype(imtype) |
18 | simple policy | """Test the flask oso plugin."""
from pathlib import Path
import pytest
from flask import Flask
from oso import Oso, OsoError
from werkzeug.exceptions import Forbidden
from flask_oso import FlaskOso, authorize, skip_authorization
@pytest.fixture
def flask_app():
return Flask("test")
@pytest.fixture
def oso():
return Oso()
@pytest.fixture
def user():
return "user"
@pytest.fixture
def flask_oso(flask_app, oso, user):
fo = FlaskOso(oso=oso, app=flask_app)
fo.set_get_actor(lambda: user)
return fo
@pytest.fixture
def METHOD_NAME(oso):
"""Load a simple base policy into oso."""
oso.load_file(Path(__file__).parent / "simple.polar")
@pytest.fixture
def app_ctx(flask_app):
with flask_app.app_context():
yield
def test_initialization_with_set(flask_app, oso, METHOD_NAME, app_ctx, user):
"""Test that setting oso works correctly."""
# Establish that an improperly initialized flask oso throws an exception.
flask_oso = FlaskOso()
flask_oso.set_get_actor(lambda: user)
with pytest.raises(OsoError):
flask_oso.authorize(action="read", resource="resource")
# Works after set oso.
flask_oso.set_oso(oso)
flask_oso.authorize(action="read", resource="resource")
def test_initialization_with_init(flask_app, oso, METHOD_NAME, app_ctx, user):
# Works with oso init.
flask_oso = FlaskOso(oso=oso)
flask_oso.set_get_actor(lambda: user)
flask_oso.authorize(action="read", resource="resource")
def test_authorize(flask_app, flask_oso, METHOD_NAME, app_ctx):
"""Test that authorize function works correctly."""
# Actor defaults to current actor.
flask_oso.authorize("resource", action="read")
# Overridden actor.
with pytest.raises(Forbidden):
flask_oso.authorize("resource", actor="other", action="read")
flask_oso.authorize("other_resource", actor="other_user", action="read")
# Request method action default
with flask_app.test_request_context(method="GET"):
flask_oso.authorize("action_resource")
with flask_app.test_request_context(method="POST"):
with pytest.raises(Forbidden):
flask_oso.authorize("action_resource")
flask_oso.set_get_actor(lambda: "other_user")
flask_oso.authorize("other_resource", action="read")
def test_require_authorization(flask_app, flask_oso, app_ctx, METHOD_NAME):
flask_oso.require_authorization(flask_app)
flask_app.testing = True
@flask_app.route("/")
def hello():
return "Hello"
# Don't call authorize.
with pytest.raises(OsoError):
with flask_app.test_client() as c:
c.get("/")
@flask_app.route("/auth")
def auth():
flask_oso.authorize("resource", action="read")
return "Hello"
with flask_app.test_client() as c:
resp = c.get("/auth")
assert resp.status_code == 200
# Decorator works
@flask_app.route("/decorator")
@authorize(action="read", resource="resource")
def decorated():
return "Hello"
with flask_app.test_client() as c:
resp = c.get("/decorator")
assert resp.status_code == 200
# Skip auth silences error
@flask_app.route("/open")
@skip_authorization
def open():
return "open"
with flask_app.test_client() as c:
resp = c.get("/open")
assert resp.status_code == 200
# 404 doesn't require authorization
with flask_app.test_client() as c:
resp = c.get("/nonexistent")
assert resp.status_code == 404
# Server error does
@flask_app.route("/500")
def server_error():
raise Exception("You messed this one up")
flask_app.testing = False
# Ensure that requiring authorization doesn't interfere with surfacing
# other exceptions that occur during the request.
with flask_app.test_client() as c:
resp = c.get("/500")
assert resp.status_code == 500
def test_route_authorization(flask_oso, oso, flask_app, app_ctx):
"""Test that route authorization middleware works."""
flask_oso.perform_route_authorization(app=flask_app)
flask_app.testing = True
@flask_app.route("/test_route", methods=("GET",))
def test():
return "Test"
with flask_app.test_client() as c:
with pytest.raises(OsoError) as e:
c.get("/test_route")
assert "Query for undefined rule `allow`" in str(e)
# Add rule to policy.
oso.load_str('allow("user", "GET", _: Request{path: "/test_route"});')
flask_oso.set_get_actor(lambda: "other_user")
with flask_app.test_client() as c:
assert c.get("/test_route").status_code == 403
flask_oso.set_get_actor(lambda: "user")
with flask_app.test_client() as c:
assert c.get("/test_route").status_code == 200
# Confirm that route authorization doesn't mess with errors.
with flask_app.test_client() as c:
assert c.get("/not_a_route").status_code == 404
with flask_app.test_client() as c:
assert c.post("/test_route").status_code == 405
def test_route_authorizaton_manual(flask_oso, oso, flask_app, app_ctx):
"""Perform route auth manually."""
flask_app.testing = True
from flask import request
@flask_app.route("/test_route")
@authorize(resource=request)
def auth():
return "authed"
with flask_app.test_client() as c:
with pytest.raises(OsoError) as e:
c.get("/test_route")
assert "Query for undefined rule `allow`" in str(e)
# Add rule
oso.load_str('allow("user", "GET", _: Request{path: "/test_route"});')
flask_oso.set_get_actor(lambda: "other_user")
with flask_app.test_client() as c:
assert c.get("/test_route").status_code == 403
flask_oso.set_get_actor(lambda: "user")
with flask_app.test_client() as c:
assert c.get("/test_route").status_code == 200
def test_custom_unauthorize(flask_oso, oso, flask_app, app_ctx):
"""Test that a custom unauthorize handler can be provided."""
auth_failed = False
def unauth():
nonlocal auth_failed
auth_failed = True
flask_oso.set_unauthorized_action(unauth)
# Add rule
oso.load_str('allow(_, "not bad", _);')
flask_oso.authorize(resource="fail!", action="bad")
assert auth_failed
def test_no_oso_error(flask_app, oso):
"""Test that using authorize without init app throws an error."""
with pytest.raises(OsoError, match="Application context"):
@authorize(resource="test")
def orm_function():
return "model"
orm_function()
with flask_app.app_context():
with pytest.raises(OsoError, match="init_app"):
@flask_app.route("/")
@authorize(resource="test")
def route():
return "test"
flask_app.testing = True
with flask_app.test_client() as c:
c.get("/").status_code |
19 | slice | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# The following code has been taken from
# https://github.com/facebookresearch/DPR, which is CC-BY-NC 4.0
# licensed as of now. More details on the license can be found
# at https://github.com/facebookresearch/DPR/blob/master/LICENSE
"""
Most of the tokenizers code here is copied from DrQA codebase to avoid adding extra dependency
"""
import copy
import logging
import regex
import spacy
logger = logging.getLogger(__name__)
class Tokens(object):
"""A class to represent a list of tokenized text."""
TEXT = 0
TEXT_WS = 1
SPAN = 2
POS = 3
LEMMA = 4
NER = 5
def __init__(self, data, annotators, opts=None):
self.data = data
self.annotators = annotators
self.opts = opts or {}
def __len__(self):
"""The number of tokens."""
return len(self.data)
def METHOD_NAME(self, i=None, j=None):
"""Return a view of the list of tokens from [i, j)."""
new_tokens = copy.copy(self)
new_tokens.data = self.data[i: j]
return new_tokens
def untokenize(self):
"""Returns the original text (with whitespace reinserted)."""
return ''.join([t[self.TEXT_WS] for t in self.data]).strip()
def words(self, uncased=False):
"""Returns a list of the text of each token
Args:
uncased: lower cases text
"""
if uncased:
return [t[self.TEXT].lower() for t in self.data]
else:
return [t[self.TEXT] for t in self.data]
def offsets(self):
"""Returns a list of [start, end) character offsets of each token."""
return [t[self.SPAN] for t in self.data]
def pos(self):
"""Returns a list of part-of-speech tags of each token.
Returns None if this annotation was not included.
"""
if 'pos' not in self.annotators:
return None
return [t[self.POS] for t in self.data]
def lemmas(self):
"""Returns a list of the lemmatized text of each token.
Returns None if this annotation was not included.
"""
if 'lemma' not in self.annotators:
return None
return [t[self.LEMMA] for t in self.data]
def entities(self):
"""Returns a list of named-entity-recognition tags of each token.
Returns None if this annotation was not included.
"""
if 'ner' not in self.annotators:
return None
return [t[self.NER] for t in self.data]
def ngrams(self, n=1, uncased=False, filter_fn=None, as_strings=True):
"""Returns a list of all ngrams from length 1 to n.
Args:
n: upper limit of ngram length
uncased: lower cases text
filter_fn: user function that takes in an ngram list and returns
True or False to keep or not keep the ngram
as_string: return the ngram as a string vs list
"""
def _skip(gram):
if not filter_fn:
return False
return filter_fn(gram)
words = self.words(uncased)
ngrams = [(s, e + 1)
for s in range(len(words))
for e in range(s, min(s + n, len(words)))
if not _skip(words[s:e + 1])]
# Concatenate into strings
if as_strings:
ngrams = ['{}'.format(' '.join(words[s:e])) for (s, e) in ngrams]
return ngrams
def entity_groups(self):
"""Group consecutive entity tokens with the same NER tag."""
entities = self.entities()
if not entities:
return None
non_ent = self.opts.get('non_ent', 'O')
groups = []
idx = 0
while idx < len(entities):
ner_tag = entities[idx]
# Check for entity tag
if ner_tag != non_ent:
# Chomp the sequence
start = idx
while (idx < len(entities) and entities[idx] == ner_tag):
idx += 1
groups.append((self.METHOD_NAME(start, idx).untokenize(), ner_tag))
else:
idx += 1
return groups
class Tokenizer(object):
"""Base tokenizer class.
Tokenizers implement tokenize, which should return a Tokens class.
"""
def tokenize(self, text):
raise NotImplementedError
def shutdown(self):
pass
def __del__(self):
self.shutdown()
class SimpleTokenizer(Tokenizer):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self, **kwargs):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
if len(kwargs.get('annotators', {})) > 0:
logger.warning('%s only tokenizes! Skipping annotators: %s' %
(type(self).__name__, kwargs.get('annotators')))
self.annotators = set()
def tokenize(self, text):
data = []
matches = [m for m in self._regexp.finditer(text)]
for i in range(len(matches)):
# Get text
token = matches[i].group()
# Get whitespace
span = matches[i].span()
start_ws = span[0]
if i + 1 < len(matches):
end_ws = matches[i + 1].span()[0]
else:
end_ws = span[1]
# Format data
data.append((
token,
text[start_ws: end_ws],
span,
))
return Tokens(data, self.annotators)
class SpacyTokenizer(Tokenizer):
def __init__(self, **kwargs):
"""
Args:
annotators: set that can include pos, lemma, and ner.
model: spaCy model to use (either path, or keyword like 'en').
"""
model = kwargs.get('model', 'en')
self.annotators = copy.deepcopy(kwargs.get('annotators', set()))
nlp_kwargs = {'parser': False}
if not any([p in self.annotators for p in ['lemma', 'pos', 'ner']]):
nlp_kwargs['tagger'] = False
if 'ner' not in self.annotators:
nlp_kwargs['entity'] = False
self.nlp = spacy.load(model, **nlp_kwargs)
def tokenize(self, text):
# We don't treat new lines as tokens.
clean_text = text.replace('\n', ' ')
tokens = self.nlp.tokenizer(clean_text)
if any([p in self.annotators for p in ['lemma', 'pos', 'ner']]):
self.nlp.tagger(tokens)
if 'ner' in self.annotators:
self.nlp.entity(tokens)
data = []
for i in range(len(tokens)):
# Get whitespace
start_ws = tokens[i].idx
if i + 1 < len(tokens):
end_ws = tokens[i + 1].idx
else:
end_ws = tokens[i].idx + len(tokens[i].text)
data.append((
tokens[i].text,
text[start_ws: end_ws],
(tokens[i].idx, tokens[i].idx + len(tokens[i].text)),
tokens[i].tag_,
tokens[i].lemma_,
tokens[i].ent_type_,
))
# Set special option for non-entity tag: '' vs 'O' in spaCy
return Tokens(data, self.annotators, opts={'non_ent': ''}) |
20 | build | from conan import ConanFile
from conan.errors import ConanInvalidConfiguration
from conan.tools.files import apply_conandata_patches, chdir, collect_libs, copy, export_conandata_patches, get, replace_in_file, rm, rmdir
from conan.tools.microsoft import is_msvc
from conan.tools.scm import Version
from conans import AutoToolsBuildEnvironment, MSBuild, tools
import os
required_conan_version = ">=1.53.0"
class LibStudXmlConan(ConanFile):
name = "libstudxml"
description = "A streaming XML pull parser and streaming XML serializer implementation for modern, standard C++."
topics = ("xml", "xml-parser", "serialization")
url = "https://github.com/conan-io/conan-center-index"
homepage = "https://www.codesynthesis.com/projects/libstudxml/"
license = "MIT"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
}
default_options = {
"shared": False,
"fPIC": True,
}
_autotools = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _settings_build(self):
return getattr(self, "settings_build", self.settings)
def export_sources(self):
export_conandata_patches(self)
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def configure(self):
if self.options.shared:
self.options.rm_safe("fPIC")
def layout(self):
pass
def requirements(self):
self.requires("expat/2.5.0", transitive_headers=True, transitive_libs=True)
def validate(self):
if self.info.settings.compiler == "Visual Studio" and Version(self.info.settings.compiler.version) < "9":
raise ConanInvalidConfiguration(f"Visual Studio {self.info.settings.compiler.version} is not supported.")
def build_requirements(self):
if not is_msvc(self):
self.tool_requires("gnu-config/cci.20210814")
self.tool_requires("libtool/2.4.7")
if self._settings_build.os == "Windows" and not tools.get_env("CONAN_BASH_PATH"):
self.tool_requires("msys2/cci.latest")
def source(self):
get(self, **self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _configure_autotools(self):
if not self._autotools:
args = ["--with-external-expat"]
if self.options.shared:
args.extend(["--enable-shared", "--disable-static"])
else:
args.extend(["--disable-shared", "--enable-static"])
self._autotools = AutoToolsBuildEnvironment(self, win_bash=tools.os_info.is_windows)
self._autotools.configure(configure_dir=self._source_subfolder, args=args)
return self._autotools
@property
def _vc_ver(self):
if self.settings.compiler == "Visual Studio":
return str(Version(self.settings.compiler.version).major)
elif self.settings.compiler == "msvc":
return {
"170": "11",
"180": "12",
"190": "14",
"191": "15",
"192": "16",
"193": "17",
}[str(self.settings.compiler.version)]
return None
def _build_vs(self):
vc_ver = int(self._vc_ver)
sln_path = None
def get_sln_path():
return os.path.join(self.source_folder, self._source_subfolder, f"libstudxml-vc{vc_ver}.sln")
sln_path = get_sln_path()
while not os.path.exists(sln_path):
vc_ver -= 1
sln_path = get_sln_path()
proj_path = os.path.join(self.source_folder, self._source_subfolder, "xml", f"libstudxml-vc{vc_ver}.vcxproj")
if not self.options.shared:
replace_in_file(self, proj_path, "DynamicLibrary", "StaticLibrary")
replace_in_file(self, proj_path, "LIBSTUDXML_DYNAMIC_LIB", "LIBSTUDXML_STATIC_LIB")
msbuild = MSBuild(self)
msbuild.METHOD_NAME(sln_path, platforms={"x86": "Win32"})
def _build_autotools(self):
for gnu_config in [
self.conf.get("user.gnu-config:config_guess", check_type=str),
self.conf.get("user.gnu-config:config_sub", check_type=str),
]:
if gnu_config:
copy(
self,
os.path.basename(gnu_config),
src=os.path.dirname(gnu_config),
dst=os.path.join(self.source_folder, self._source_subfolder, "config"),
)
if self.settings.compiler.get_safe("libcxx") == "libc++":
# libc++ includes a file called 'version', and since libstudxml adds source_subfolder as an
# include dir, libc++ ends up including their 'version' file instead, causing a compile error
rm(self, "version", os.path.join(self.source_folder, self._source_subfolder))
with chdir(self, os.path.join(self.source_folder, self._source_subfolder)):
self.run("{} -fiv".format(tools.get_env("AUTORECONF")), win_bash=tools.os_info.is_windows)
autotools = self._configure_autotools()
autotools.make()
def METHOD_NAME(self):
apply_conandata_patches(self)
if is_msvc(self):
self._build_vs()
else:
self._build_autotools()
def package(self):
copy(self, "LICENSE", src=os.path.join(self.source_folder, self._source_subfolder), dst=os.path.join(self.package_folder, "licenses"))
if is_msvc(self):
self.copy("xml/value-traits", dst="include", src=self._source_subfolder)
self.copy("xml/serializer", dst="include", src=self._source_subfolder)
self.copy("xml/qname", dst="include", src=self._source_subfolder)
self.copy("xml/parser", dst="include", src=self._source_subfolder)
self.copy("xml/forward", dst="include", src=self._source_subfolder)
self.copy("xml/exception", dst="include", src=self._source_subfolder)
self.copy("xml/content", dst="include", src=self._source_subfolder)
self.copy("xml/*.ixx", dst="include", src=self._source_subfolder)
self.copy("xml/*.txx", dst="include", src=self._source_subfolder)
self.copy("xml/*.hxx", dst="include", src=self._source_subfolder)
self.copy("xml/*.h", dst="include", src=self._source_subfolder)
suffix = ""
if self.settings.arch == "x86_64":
suffix = "64"
if self.options.shared:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "lib" + suffix))
self.copy("*.dll", dst="bin", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
self.copy("*.lib", dst="lib", src=os.path.join(self._source_subfolder, "bin" + suffix))
else:
autotools = self._configure_autotools()
autotools.install()
rm(self, "*.la", os.path.join(self.package_folder, "lib"))
rmdir(self, os.path.join(self.package_folder, "lib", "pkgconfig"))
rmdir(self, os.path.join(self.package_folder, "share"))
def package_info(self):
self.cpp_info.set_property("pkg_config_name", "libstudxml")
self.cpp_info.libs = collect_libs(self)
# If built with makefile, static library mechanism is provided by their buildsystem already
if is_msvc(self) and not self.options.shared:
self.cpp_info.defines = ["LIBSTUDXML_STATIC_LIB=1"] |
21 | main | # Developed for module tiericide, this script will quickly print out a market
# conversion map based on patch notes, as well as database conversion mapping.
import argparse
import os.path
import sqlite3
import sys
# Add eos root path to sys.path so we can import ourselves
path = os.path.dirname(__file__)
sys.path.append(os.path.realpath(os.path.join(path, "..")))
# change to correct conversion
rename_phrase = " renamed to "
conversion_phrase = " -> "
text = """
Veldspar Mining Crystal I -> Simple Asteroid Mining Crystal Type A I
Scordite Mining Crystal I -> Simple Asteroid Mining Crystal Type A I
Pyroxeres Mining Crystal I -> Simple Asteroid Mining Crystal Type A I
Plagioclase Mining Crystal I -> Simple Asteroid Mining Crystal Type A I
Veldspar Mining Crystal II -> Simple Asteroid Mining Crystal Type A II
Scordite Mining Crystal II -> Simple Asteroid Mining Crystal Type A II
Pyroxeres Mining Crystal II -> Simple Asteroid Mining Crystal Type A II
Plagioclase Mining Crystal II -> Simple Asteroid Mining Crystal Type A II
Omber Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Kernite Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Jaspet Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Hemorphite Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Hedbergite Mining Crystal I -> Coherent Asteroid Mining Crystal Type A I
Omber Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Jaspet Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Kernite Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Hedbergite Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Hemorphite Mining Crystal II -> Coherent Asteroid Mining Crystal Type A II
Gneiss Mining Crystal I -> Variegated Asteroid Mining Crystal Type A I
Dark Ochre Mining Crystal I -> Variegated Asteroid Mining Crystal Type A I
Crokite Mining Crystal I -> Variegated Asteroid Mining Crystal Type A I
Gneiss Mining Crystal II -> Variegated Asteroid Mining Crystal Type A II
Dark Ochre Mining Crystal II -> Variegated Asteroid Mining Crystal Type A II
Crokite Mining Crystal II -> Variegated Asteroid Mining Crystal Type A II
Bistot Mining Crystal I -> Complex Asteroid Mining Crystal Type A I
Arkonor Mining Crystal I -> Complex Asteroid Mining Crystal Type A I
Spodumain Mining Crystal I -> Complex Asteroid Mining Crystal Type A I
Bistot Mining Crystal II -> Complex Asteroid Mining Crystal Type A II
Arkonor Mining Crystal II -> Complex Asteroid Mining Crystal Type A II
Spodumain Mining Crystal II -> Complex Asteroid Mining Crystal Type A II
"""
def METHOD_NAME(old, new):
# Open both databases and get their cursors
old_db = sqlite3.connect(os.path.expanduser(old))
old_cursor = old_db.cursor()
new_db = sqlite3.connect(os.path.expanduser(new))
new_cursor = new_db.cursor()
renames = {}
conversions = {}
for x in text.splitlines():
x = x.strip()
if not x:
continue
if conversion_phrase in x:
c = x.split(conversion_phrase)
container = conversions
elif rename_phrase in x:
c = x.split(rename_phrase)
container = renames
else:
print("Unknown format: {}".format(x))
sys.exit()
old_name, new_name = c[0], c[1]
old_item, new_item = None, None
if "Blueprint" in old_name or "Blueprint" in new_name:
print("Blueprint: Skipping this line: %s"%x)
continue
# gather item info
new_cursor.execute('SELECT "typeID" FROM "invtypes" WHERE "typeName" = ?', (new_name,))
for row in new_cursor:
new_item = row[0]
break
old_cursor.execute('SELECT "typeID" FROM "invtypes" WHERE "typeName" = ?', (old_name,))
for row in old_cursor:
old_item = row[0]
break
if not old_item:
print("Error finding old item in {} -> {}".format(old_name, new_name))
if not new_item:
print("Error finding new item in {} -> {}".format(old_name, new_name))
if not container.get((new_item,new_name), None):
container[(new_item,new_name)] = []
container[(new_item,new_name)].append((old_item, old_name))
print(" # Renamed items")
for new, old in renames.items():
if len(old) != 1:
print("Incorrect length, key: {}, value: {}".format(new, old))
sys.exit()
old = old[0]
print(" \"{}\": \"{}\",".format(old[1], new[1]))
# Convert modules
print("\n # Converted items")
for new, olds in conversions.items():
for old in olds:
print(" \"{}\": \"{}\",".format(old[1], new[1]))
print()
print()
for new, old in conversions.items():
print(" {}: ( # {}".format(new[0], new[1]))
for item in old:
print(" {}, # {}".format(item[0], item[1]))
print(" ),")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--old", type=str)
parser.add_argument("-n", "--new", type=str)
args = parser.parse_args()
METHOD_NAME(args.old, args.new) |
22 | test set api key | import json
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch
from urllib.parse import unquote_plus
import pytest
from tribler.gui.utilities import TranslatedString, compose_magnetlink, create_api_key, dict_item_is_any_of, \
duration_to_string, format_api_key, get_i18n_file_path, get_languages_file_content, I18N_DIR, LANGUAGES_FILE, \
quote_plus_unicode, set_api_key, unicode_quoter
def test_quoter_char():
"""
Test if an ASCII character is quoted correctly
"""
char = 'A'
encoded = unicode_quoter(char)
assert char == unquote_plus(encoded)
def test_quoter_unichar():
"""
Test if a unicode character is quoted correctly
"""
char = '\u9b54'
encoded = unicode_quoter(char)
assert char == unquote_plus(encoded)
def test_quoter_reserved():
"""
Test if a URI reserved character is quoted correctly
"""
char = '+'
encoded = unicode_quoter(char)
assert char != encoded
assert char == unquote_plus(encoded)
def test_quote_plus_unicode_char():
"""
Test if a ASCII characters are quoted correctly
"""
s = 'Ab09'
encoded = quote_plus_unicode(s)
assert s == unquote_plus(encoded)
def test_quote_plus_unicode_unichar():
"""
Test if unicode characters are quoted correctly
"""
s = '\u9b54\u11b3\uaf92\u1111'
encoded = quote_plus_unicode(s)
assert s == unquote_plus(encoded)
def test_quote_plus_unicode_reserved():
"""
Test if a URI reserved characters are quoted correctly
"""
s = '+ &'
encoded = quote_plus_unicode(s)
assert s != encoded
assert s == unquote_plus(encoded)
def test_quote_plus_unicode_compound():
"""
Test if a jumble of unicode, reserved and normal chars are quoted correctly
"""
s = '\u9b54\u11b3+ A5&\uaf92\u1111'
encoded = quote_plus_unicode(s)
assert s != encoded
assert s == unquote_plus(encoded)
def test_compose_magnetlink():
infohash = "DC4B96CF85A85CEEDB8ADC4B96CF85A85CEEDB8A"
name = "Some torrent name"
trackers = ['http://tracker1.example.com:8080/announce', 'http://tracker1.example.com:8080/announce']
expected_link0 = ""
expected_link1 = "magnet:?xt=urn:btih:DC4B96CF85A85CEEDB8ADC4B96CF85A85CEEDB8A"
expected_link2 = "magnet:?xt=urn:btih:DC4B96CF85A85CEEDB8ADC4B96CF85A85CEEDB8A&dn=Some+torrent+name"
expected_link3 = (
"magnet:?xt=urn:btih:DC4B96CF85A85CEEDB8ADC4B96CF85A85CEEDB8A&dn=Some+torrent+name"
"&tr=http://tracker1.example.com:8080/announce&tr=http://tracker1.example.com:8080/announce"
)
composed_link0 = compose_magnetlink(None)
composed_link1 = compose_magnetlink(infohash)
composed_link2 = compose_magnetlink(infohash, name=name)
composed_link3 = compose_magnetlink(infohash, name=name, trackers=trackers)
assert composed_link0 == expected_link0
assert composed_link1 == expected_link1
assert composed_link2 == expected_link2
assert composed_link3 == expected_link3
def test_is_dict_has():
assert not dict_item_is_any_of(None, None, None)
assert not dict_item_is_any_of({}, None, None)
d = {
'k': 'v',
'k1': 'v1'
}
assert not dict_item_is_any_of(d, 'missed_key', None)
assert not dict_item_is_any_of(d, 'missed_key', ['any_value'])
assert not dict_item_is_any_of(d, 'k', ['missed_value'])
assert not dict_item_is_any_of(d, 'k', ['missed_value', 'missed_value1'])
assert dict_item_is_any_of(d, 'k', ['v'])
assert dict_item_is_any_of(d, 'k', ['v', 'a'])
assert dict_item_is_any_of(d, 'k', ['a', 'v'])
def test_create_api_key():
x = create_api_key()
assert len(x) == 32 and bytes.fromhex(x).hex() == x
def test_format_api_key():
api_key = "abcdef"
x = format_api_key(api_key)
assert x == "abcdef"
api_key = b"abcdef"
x = format_api_key(api_key)
assert x == "abcdef"
api_key = 123
match_str = r"^Got unexpected value type of api_key from gui settings \(should be str or bytes\): int$"
with pytest.raises(ValueError, match=match_str):
format_api_key(api_key)
def METHOD_NAME():
gui_settings = MagicMock()
set_api_key(gui_settings, "abcdef")
gui_settings.setValue.assert_called_once_with("api_key", b"abcdef")
TRANSLATIONS = [
(0, '0s'),
(61, '1m 1s'),
(3800, '1h 3m'),
(110000, '1d 6h'),
(1110000, '1w 5d'),
(91110000, '2y 46w'),
(11191110000, 'Forever'),
]
@pytest.mark.parametrize('seconds, translation', TRANSLATIONS)
@patch('tribler.gui.utilities.tr', new=Mock(side_effect=lambda x: x))
def test_duration_to_string(seconds, translation):
# test if the duration_to_string function returns the correct translation for all possible formats
assert duration_to_string(seconds) == translation
def test_correct_translation():
original_string = 'original %(key1)s'
translated_string = 'translated %(key1)s'
s = TranslatedString(translated_string, original_string)
assert s % {'key1': '123'} == 'translated 123'
@patch('tribler.gui.utilities.logger.warning')
def test_missed_key_in_translated_string(warning: Mock):
original_string = 'original %(key1)s'
translated_string = 'translated %(key2)s'
s = TranslatedString(translated_string, original_string)
# In this test, we pass the correct param 'key1' presented in the original string but missed in the translation.
# The KeyError is intercepted, the original string is used instead of the translation, and the error is logged
# as a warning.
assert s % {'key1': '123'} == 'original 123'
warning.assert_called_once_with('KeyError: No value provided for \'key2\' in translation "translated %(key2)s", '
'original string: "original %(key1)s"')
@patch('tribler.gui.utilities.logger.warning')
def test_missed_key_in_both_translated_and_original_strings(warning: Mock):
original_string = 'original %(key1)s'
translated_string = 'translated %(key2)s'
s = TranslatedString(translated_string, original_string)
with pytest.raises(KeyError, match=r"^'key1'$"):
# In this test, we pass an incorrect param 'key3' for interpolation, and also, the translation
# string (with param 'key2') differs from the original string (with param 'key1'). First,
# translated string tries to interpolate params and issues a warning that 'key2' is missed.
# Then, the original string tries to interpolate params and again gets a KeyError because 'key1'
# is also missed. This second exception is propagated because the main reason for the error is
# in the outside code that passes an incorrect parameter.
_ = s % {'key3': '123'}
warning.assert_called_once_with('KeyError: No value provided for \'key2\' in translation "translated %(key2)s", '
'original string: "original %(key1)s"')
@patch("tribler.gui.utilities.get_base_path")
def test_i18n_file_path_and_languages_content(mock_get_base_path, tmp_path):
mock_get_base_path.return_value = tmp_path
filename = "languages.json"
expected_path = Path(tmp_path) / I18N_DIR / filename
assert get_i18n_file_path(filename) == expected_path
languages_json = {
"unknown": "Unknown",
"en": "English",
"nl": "Dutch"
}
language_path = get_i18n_file_path(LANGUAGES_FILE)
language_path.parents[0].mkdir(parents=True, exist_ok=True)
language_path.write_text(json.dumps(languages_json))
assert languages_json == get_languages_file_content() |
23 | test writing and reading registers | """Tests for Alazar DLL API
This suite of tests is expected to be executed on a Windows PC with a single
Alazar board installed.
"""
import gc
import logging
import os
from weakref import WeakValueDictionary
import pytest
from pytest import LogCaptureFixture
from qcodes.instrument_drivers.AlazarTech.ATS import AlazarTech_ATS
from qcodes.instrument_drivers.AlazarTech.ats_api import AlazarATSAPI
from qcodes.instrument_drivers.AlazarTech.constants import (
API_SUCCESS,
ERROR_CODES,
Capability,
)
from qcodes.instrument_drivers.AlazarTech.dll_wrapper import DllWrapperMeta
def _skip_if_alazar_dll_and_boards_not_installed():
if not os.path.exists(AlazarTech_ATS.dll_path + '.dll'):
return pytest.mark.skip(
"Alazar API DLL was not found in 'AlazarTech_ATS.dll_path'.")
return pytest.mark.skipif(
len(AlazarTech_ATS.find_boards()) != 1,
reason='No, or more than one Alazar boards are installed on this PC.')
pytestmark = _skip_if_alazar_dll_and_boards_not_installed()
# Set the following constants to correct values, they are used in tests below.
SYSTEM_ID = 1
BOARD_ID = 1
@pytest.fixture
def alazar():
alazar = AlazarTech_ATS('alazar', system_id=SYSTEM_ID, board_id=BOARD_ID)
yield alazar
alazar.close()
@pytest.fixture
def alazar_api():
yield AlazarATSAPI(AlazarTech_ATS.dll_path)
def test_alazar_api_singleton_behavior(caplog: LogCaptureFixture) -> None:
def using_msg(dll_path):
return f"Using existing instance for DLL path {dll_path}."
def creating_msg(dll_path):
return f"Creating new instance for DLL path {dll_path}."
assert DllWrapperMeta._instances == WeakValueDictionary()
with caplog.at_level(logging.DEBUG):
api1 = AlazarATSAPI(AlazarTech_ATS.dll_path)
assert DllWrapperMeta._instances == WeakValueDictionary(
{AlazarTech_ATS.dll_path: api1}
)
assert caplog.records[-1].message == creating_msg(AlazarTech_ATS.dll_path)
caplog.clear()
with caplog.at_level(logging.DEBUG):
api2 = AlazarATSAPI(AlazarTech_ATS.dll_path)
assert api2 is api1
assert DllWrapperMeta._instances == WeakValueDictionary(
{AlazarTech_ATS.dll_path: api1}
)
assert caplog.records[-1].message == using_msg(AlazarTech_ATS.dll_path)
caplog.clear()
# Indeed, this actually exposes a vulnarability of the setup. As far as
# LoadLibrary from ctypes is concerned, both "..\AlazarApi" and
# "..\AlazarApi.dll" would result in the same loaded library with even
# the same `_handle` value. But here we will abuse this in order to create
# a new instance of the Alazar API class by using the same DLL file.
# This should probably be fixed.
dll_path_3 = AlazarTech_ATS.dll_path + '.dll'
with caplog.at_level(logging.DEBUG):
api3 = AlazarATSAPI(dll_path_3)
assert api3 is not api1
assert api3 is not api2
assert DllWrapperMeta._instances == WeakValueDictionary(
{AlazarTech_ATS.dll_path: api1, dll_path_3: api3}
)
assert caplog.records[-1].message == creating_msg(dll_path_3)
caplog.clear()
del api2
gc.collect()
assert DllWrapperMeta._instances == WeakValueDictionary(
{AlazarTech_ATS.dll_path: api1, dll_path_3: api3}
)
del api1
gc.collect()
assert DllWrapperMeta._instances == WeakValueDictionary({dll_path_3: api3})
del api3
gc.collect()
assert DllWrapperMeta._instances == WeakValueDictionary()
def test_find_boards() -> None:
boards = AlazarTech_ATS.find_boards()
assert len(boards) == 1
assert boards[0]['system_id'] == SYSTEM_ID
assert boards[0]['board_id'] == BOARD_ID
def test_get_board_info(alazar_api) -> None:
info = AlazarTech_ATS.get_board_info(api=alazar_api,
system_id=SYSTEM_ID,
board_id=BOARD_ID)
assert {'system_id', 'board_id', 'board_kind',
'max_samples', 'bits_per_sample'} == set(list(info.keys()))
assert info['system_id'] == SYSTEM_ID
assert info['board_id'] == BOARD_ID
def test_idn(alazar) -> None:
idn = alazar.get_idn()
assert {'firmware', 'model', 'serial', 'vendor', 'CPLD_version',
'driver_version', 'SDK_version', 'latest_cal_date', 'memory_size',
'asopc_type', 'pcie_link_speed', 'pcie_link_width',
'bits_per_sample', 'max_samples'
} == set(list(idn.keys()))
assert idn['vendor'] == 'AlazarTech'
assert idn['model'][:3] == 'ATS'
def test_return_codes_are_correct(alazar_api) -> None:
"""
Test correctness of the coded return codes (success, failure, unknowns),
and consistency with what `AlazarErrorToText` function returns.
"""
for code, msg in ERROR_CODES.items():
real_msg = alazar_api.error_to_text(code)
assert real_msg in msg
assert alazar_api.error_to_text(API_SUCCESS) == 'ApiSuccess'
lower_unknown = API_SUCCESS - 1
assert alazar_api.error_to_text(lower_unknown) == 'Unknown'
upper_unknown = max(list(ERROR_CODES.keys())) + 1
assert alazar_api.error_to_text(upper_unknown) == 'Unknown'
def test_get_channel_info_convenient(alazar) -> None:
bps, max_s = alazar.api.get_channel_info_(alazar._handle)
assert isinstance(bps, int)
assert isinstance(max_s, int)
def test_get_cpld_version_convenient(alazar) -> None:
cpld_ver = alazar.api.get_cpld_version_(alazar._handle)
assert isinstance(cpld_ver, str)
assert len(cpld_ver.split('.')) == 2
def test_get_driver_version_convenient(alazar_api) -> None:
driver_ver = alazar_api.get_driver_version_()
assert isinstance(driver_ver, str)
assert len(driver_ver.split('.')) == 3
def test_get_sdk_version_convenient(alazar_api) -> None:
sdk_ver = alazar_api.get_sdk_version_()
assert isinstance(sdk_ver, str)
assert len(sdk_ver.split('.')) == 3
def test_query_capability_convenient(alazar) -> None:
cap = Capability.GET_SERIAL_NUMBER
cap_value = alazar.api.query_capability_(alazar._handle, cap)
assert isinstance(cap_value, int)
def METHOD_NAME(alazar) -> None:
"""
The approach is to read the register that includes information about
trigger holdoff parameter, and write the same value back to the board.
"""
trigger_holdoff_register_offset = 58
orig_val = alazar._read_register(trigger_holdoff_register_offset)
alazar._write_register(trigger_holdoff_register_offset, orig_val)
def test_get_num_channels() -> None:
assert 1 == AlazarTech_ATS.get_num_channels(1)
assert 1 == AlazarTech_ATS.get_num_channels(8)
assert 2 == AlazarTech_ATS.get_num_channels(3)
assert 2 == AlazarTech_ATS.get_num_channels(10)
assert 4 == AlazarTech_ATS.get_num_channels(15)
assert 8 == AlazarTech_ATS.get_num_channels(255)
assert 16 == AlazarTech_ATS.get_num_channels(65535)
with pytest.raises(RuntimeError, match='0'):
AlazarTech_ATS.get_num_channels(0)
with pytest.raises(RuntimeError, match='17'):
AlazarTech_ATS.get_num_channels(17)
with pytest.raises(RuntimeError, match='100'):
AlazarTech_ATS.get_num_channels(100) |
24 | test empty play | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
from ansible.errors import AnsibleAssertionError, AnsibleParserError
from ansible.parsing.yaml.objects import AnsibleVaultEncryptedUnicode
from ansible.playbook.block import Block
from ansible.playbook.play import Play
from ansible.playbook.role import Role
from ansible.playbook.task import Task
from units.mock.loader import DictDataLoader
def METHOD_NAME():
p = Play.load({})
assert str(p) == ''
def test_play_with_hosts_string():
p = Play.load({'hosts': 'foo'})
assert str(p) == 'foo'
# Test the caching since self.name should be set by previous call.
assert p.get_name() == 'foo'
def test_basic_play():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
connection='local',
remote_user="root",
become=True,
become_user="testing",
))
assert p.name == 'test play'
assert p.hosts == ['foo']
assert p.connection == 'local'
def test_play_with_remote_user():
p = Play.load(dict(
name="test play",
hosts=['foo'],
user="testing",
gather_facts=False,
))
assert p.remote_user == "testing"
def test_play_with_user_conflict():
play_data = dict(
name="test play",
hosts=['foo'],
user="testing",
remote_user="testing",
)
with pytest.raises(AnsibleParserError):
Play.load(play_data)
def test_play_with_bad_ds_type():
play_data = []
with pytest.raises(AnsibleAssertionError, match=r"while preprocessing data \(\[\]\), ds should be a dict but was a <(?:class|type) 'list'>"):
Play.load(play_data)
def test_play_with_tasks():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
assert len(p.tasks) == 1
assert isinstance(p.tasks[0], Block)
assert p.tasks[0].has_tasks() is True
def test_play_with_handlers():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
handlers=[dict(action='shell echo "hello world"')],
))
assert len(p.handlers) >= 1
assert len(p.get_handlers()) >= 1
assert isinstance(p.handlers[0], Block)
assert p.handlers[0].has_tasks() is True
def test_play_with_pre_tasks():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
pre_tasks=[dict(action='shell echo "hello world"')],
))
assert len(p.pre_tasks) >= 1
assert isinstance(p.pre_tasks[0], Block)
assert p.pre_tasks[0].has_tasks() is True
assert len(p.get_tasks()) >= 1
assert isinstance(p.get_tasks()[0][0], Task)
assert p.get_tasks()[0][0].action == 'shell'
def test_play_with_post_tasks():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
post_tasks=[dict(action='shell echo "hello world"')],
))
assert len(p.post_tasks) >= 1
assert isinstance(p.post_tasks[0], Block)
assert p.post_tasks[0].has_tasks() is True
def test_play_with_roles(mocker):
mocker.patch('ansible.playbook.role.definition.RoleDefinition._load_role_path', return_value=('foo', '/etc/ansible/roles/foo'))
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo/tasks.yml': """
- name: role task
shell: echo "hello world"
""",
})
mock_var_manager = mocker.MagicMock()
mock_var_manager.get_vars.return_value = {}
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
roles=['foo'],
), loader=fake_loader, variable_manager=mock_var_manager)
blocks = p.compile()
assert len(blocks) > 1
assert all(isinstance(block, Block) for block in blocks)
assert isinstance(p.get_roles()[0], Role)
def test_play_compile():
p = Play.load(dict(
name="test play",
hosts=['foo'],
gather_facts=False,
tasks=[dict(action='shell echo "hello world"')],
))
blocks = p.compile()
# with a single block, there will still be three
# implicit meta flush_handler blocks inserted
assert len(blocks) == 4
@pytest.mark.parametrize(
'value, expected',
(
('my_vars.yml', ['my_vars.yml']),
(['my_vars.yml'], ['my_vars.yml']),
(['my_vars1.yml', 'my_vars2.yml'], ['my_vars1.yml', 'my_vars2.yml']),
(None, []),
)
)
def test_play_with_vars_files(value, expected):
play = Play.load({
'name': 'Play with vars_files',
'hosts': ['testhost1'],
'vars_files': value,
})
assert play.vars_files == value
assert play.get_vars_files() == expected
@pytest.mark.parametrize('value', ([], tuple(), set(), {}, '', None, False, 0))
def test_play_empty_hosts(value):
with pytest.raises(AnsibleParserError, match='Hosts list cannot be empty'):
Play.load({'hosts': value})
@pytest.mark.parametrize('value', ([None], (None,), ['one', None]))
def test_play_none_hosts(value):
with pytest.raises(AnsibleParserError, match="Hosts list cannot contain values of 'None'"):
Play.load({'hosts': value})
@pytest.mark.parametrize(
'value',
(
{'one': None},
{'one': 'two'},
True,
1,
1.75,
AnsibleVaultEncryptedUnicode('secret'),
)
)
def test_play_invalid_hosts_sequence(value):
with pytest.raises(AnsibleParserError, match='Hosts list must be a sequence or string'):
Play.load({'hosts': value})
@pytest.mark.parametrize(
'value',
(
[[1, 'two']],
[{'one': None}],
[set((None, 'one'))],
['one', 'two', {'three': None}],
['one', 'two', {'three': 'four'}],
[AnsibleVaultEncryptedUnicode('secret')],
)
)
def test_play_invalid_hosts_value(value):
with pytest.raises(AnsibleParserError, match='Hosts list contains an invalid host value'):
Play.load({'hosts': value})
def test_play_with_vars():
play = Play.load({}, vars={'var1': 'val1'})
assert play.get_name() == ''
assert play.vars == {'var1': 'val1'}
assert play.get_vars() == {'var1': 'val1'}
def test_play_no_name_hosts_sequence():
play = Play.load({'hosts': ['host1', 'host2']})
assert play.get_name() == 'host1,host2'
def test_play_hosts_template_expression():
play = Play.load({'hosts': "{{ target_hosts }}"})
assert play.get_name() == '{{ target_hosts }}'
@pytest.mark.parametrize(
'call',
(
'_load_tasks',
'_load_pre_tasks',
'_load_post_tasks',
'_load_handlers',
'_load_roles',
)
)
def test_bad_blocks_roles(mocker, call):
mocker.patch('ansible.playbook.play.load_list_of_blocks', side_effect=AssertionError('Raised intentionally'))
mocker.patch('ansible.playbook.play.load_list_of_roles', side_effect=AssertionError('Raised intentionally'))
play = Play.load({})
with pytest.raises(AnsibleParserError, match='A malformed (block|(role declaration)) was encountered'):
getattr(play, call)('', None) |
25 | roi align common | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-nested-blocks
"Roi align in python"
import math
import numpy as np
def _bilinear(a_np, n, c, y, x, height, width, layout):
if y < -1 or y > height or x < -1 or x > width:
return 0
y = min(max(y, 0), height - 1)
x = min(max(x, 0), width - 1)
y_low = int(math.floor(y))
x_low = int(math.floor(x))
y_high = y_low + 1
x_high = x_low + 1
wy_h = y - y_low
wx_h = x - x_low
wy_l = 1 - wy_h
wx_l = 1 - wx_h
val = 0
for wx, xp in zip((wx_l, wx_h), (x_low, x_high)):
for wy, yp in zip((wy_l, wy_h), (y_low, y_high)):
if 0 <= yp < height and 0 <= xp < width:
if layout == "NCHW":
val += wx * wy * a_np[n, c, yp, xp]
else:
val += wx * wy * a_np[n, yp, xp, c]
return val
def METHOD_NAME(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
layout,
):
"""Common code used by roi align NCHW and NHWC"""
num_roi = rois_np.shape[0]
for i in range(num_roi):
roi = rois_np[i]
batch_index = int(roi[0])
roi_start_w, roi_start_h, roi_end_w, roi_end_h = roi[1:] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
if sample_ratio > 0:
roi_bin_grid_h = roi_bin_grid_w = int(sample_ratio)
else:
roi_bin_grid_h = int(math.ceil(roi_h / pooled_size_h))
roi_bin_grid_w = int(math.ceil(roi_w / pooled_size_w))
count = roi_bin_grid_h * roi_bin_grid_w
for c in range(channel):
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
if avg_mode:
total = 0.0
if max_mode:
total = float("-inf")
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
y = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
x = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
if avg_mode:
total += (
_bilinear(a_np, batch_index, c, y, x, height, width, layout)
/ count
)
if max_mode:
total = max(
total,
_bilinear(a_np, batch_index, c, y, x, height, width, layout),
)
if layout == "NCHW":
b_np[i, c, ph, pw] = total
else:
b_np[i, ph, pw, c] = total
return b_np
def roi_align_nchw_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NCHW in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, channel, height, width = a_np.shape
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((rois_np.shape[0], channel, pooled_size_h, pooled_size_w), dtype=a_np.dtype)
return METHOD_NAME(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NCHW",
)
def roi_align_nhwc_python(a_np, rois_np, pooled_size, spatial_scale, sample_ratio, mode=b"avg"):
"""Roi align NHWC in python"""
avg_mode = mode in (b"avg", "avg", 0)
max_mode = mode in (b"max", "max", 1)
assert avg_mode or max_mode, "Mode must be average or max. Please pass a valid mode."
_, height, width, channel = a_np.shape
num_roi = rois_np.shape[0]
if isinstance(pooled_size, int):
pooled_size_h = pooled_size_w = pooled_size
else:
pooled_size_h, pooled_size_w = pooled_size
b_np = np.zeros((num_roi, pooled_size_h, pooled_size_w, channel), dtype=a_np.dtype)
return METHOD_NAME(
a_np,
b_np,
rois_np,
channel,
pooled_size_h,
pooled_size_w,
spatial_scale,
sample_ratio,
avg_mode,
max_mode,
height,
width,
"NHWC",
) |
26 | test main | """Test suite for the cProfile module."""
import sys
from test.test_support import run_unittest, TESTFN, unlink
# rip off all interesting stuff from test_profile
import cProfile
from test.test_profile import ProfileTest, regenerate_expected_output
class CProfileTest(ProfileTest):
profilerclass = cProfile.Profile
expected_list_sort_output = "{method 'sort' of 'list' objects}"
# Issue 3895.
def test_bad_counter_during_dealloc(self):
import _lsprof
# Must use a file as StringIO doesn't trigger the bug.
sys.stderr = open(TESTFN, 'w')
try:
obj = _lsprof.Profiler(lambda: int)
obj.enable()
obj = _lsprof.Profiler(1)
obj.disable()
finally:
sys.stderr = sys.__stderr__
unlink(TESTFN)
def METHOD_NAME():
run_unittest(CProfileTest)
def main():
if '-r' not in sys.argv:
METHOD_NAME()
else:
regenerate_expected_output(__file__, CProfileTest)
# Don't remove this comment. Everything below it is auto-generated.
#--cut--------------------------------------------------------------------------
CProfileTest.expected_output['print_stats'] = """\
126 function calls (106 primitive calls) in 1.000 seconds
Ordered by: standard name
ncalls tottime percall cumtime percall filename:lineno(function)
1 0.000 0.000 1.000 1.000 <string>:1(<module>)
28 0.028 0.001 0.028 0.001 profilee.py:110(__getattr__)
1 0.270 0.270 1.000 1.000 profilee.py:25(testfunc)
23/3 0.150 0.007 0.170 0.057 profilee.py:35(factorial)
20 0.020 0.001 0.020 0.001 profilee.py:48(mul)
2 0.040 0.020 0.600 0.300 profilee.py:55(helper)
4 0.116 0.029 0.120 0.030 profilee.py:73(helper1)
2 0.000 0.000 0.140 0.070 profilee.py:84(helper2_indirect)
8 0.312 0.039 0.400 0.050 profilee.py:88(helper2)
8 0.064 0.008 0.080 0.010 profilee.py:98(subhelper)
12 0.000 0.000 0.012 0.001 {hasattr}
4 0.000 0.000 0.000 0.000 {method 'append' of 'list' objects}
1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects}
8 0.000 0.000 0.000 0.000 {range}
4 0.000 0.000 0.000 0.000 {sys.exc_info}
"""
CProfileTest.expected_output['print_callers'] = """\
Ordered by: standard name
Function was called by...
ncalls tottime cumtime
<string>:1(<module>) <-
profilee.py:110(__getattr__) <- 16 0.016 0.016 profilee.py:98(subhelper)
12 0.012 0.012 {hasattr}
profilee.py:25(testfunc) <- 1 0.270 1.000 <string>:1(<module>)
profilee.py:35(factorial) <- 1 0.014 0.130 profilee.py:25(testfunc)
20/3 0.130 0.147 profilee.py:35(factorial)
2 0.006 0.040 profilee.py:84(helper2_indirect)
profilee.py:48(mul) <- 20 0.020 0.020 profilee.py:35(factorial)
profilee.py:55(helper) <- 2 0.040 0.600 profilee.py:25(testfunc)
profilee.py:73(helper1) <- 4 0.116 0.120 profilee.py:55(helper)
profilee.py:84(helper2_indirect) <- 2 0.000 0.140 profilee.py:55(helper)
profilee.py:88(helper2) <- 6 0.234 0.300 profilee.py:55(helper)
2 0.078 0.100 profilee.py:84(helper2_indirect)
profilee.py:98(subhelper) <- 8 0.064 0.080 profilee.py:88(helper2)
{hasattr} <- 4 0.000 0.004 profilee.py:73(helper1)
8 0.000 0.008 profilee.py:88(helper2)
{method 'append' of 'list' objects} <- 4 0.000 0.000 profilee.py:73(helper1)
{method 'disable' of '_lsprof.Profiler' objects} <-
{range} <- 8 0.000 0.000 profilee.py:98(subhelper)
{sys.exc_info} <- 4 0.000 0.000 profilee.py:73(helper1)
"""
CProfileTest.expected_output['print_callees'] = """\
Ordered by: standard name
Function called...
ncalls tottime cumtime
<string>:1(<module>) -> 1 0.270 1.000 profilee.py:25(testfunc)
profilee.py:110(__getattr__) ->
profilee.py:25(testfunc) -> 1 0.014 0.130 profilee.py:35(factorial)
2 0.040 0.600 profilee.py:55(helper)
profilee.py:35(factorial) -> 20/3 0.130 0.147 profilee.py:35(factorial)
20 0.020 0.020 profilee.py:48(mul)
profilee.py:48(mul) ->
profilee.py:55(helper) -> 4 0.116 0.120 profilee.py:73(helper1)
2 0.000 0.140 profilee.py:84(helper2_indirect)
6 0.234 0.300 profilee.py:88(helper2)
profilee.py:73(helper1) -> 4 0.000 0.004 {hasattr}
4 0.000 0.000 {method 'append' of 'list' objects}
4 0.000 0.000 {sys.exc_info}
profilee.py:84(helper2_indirect) -> 2 0.006 0.040 profilee.py:35(factorial)
2 0.078 0.100 profilee.py:88(helper2)
profilee.py:88(helper2) -> 8 0.064 0.080 profilee.py:98(subhelper)
8 0.000 0.008 {hasattr}
profilee.py:98(subhelper) -> 16 0.016 0.016 profilee.py:110(__getattr__)
8 0.000 0.000 {range}
{hasattr} -> 12 0.012 0.012 profilee.py:110(__getattr__)
{method 'append' of 'list' objects} ->
{method 'disable' of '_lsprof.Profiler' objects} ->
{range} ->
{sys.exc_info} ->
"""
if __name__ == "__main__":
main() |
27 | get statistics | # -*- coding: utf-8 -*-
#
# http://www.privacyidea.org
#
# 2018-08-01 Cornelius Kölbel, <cornelius.koelbel@netknights.it>
# Initial writeup
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
This endpoint is used fetch monitoring/statistics data
The code of this module is tested in tests/test_api_monitoring.py
"""
from flask import (Blueprint, request)
from privacyidea.api.lib.utils import getParam, send_result
from privacyidea.api.lib.prepolicy import prepolicy, check_base_action
from privacyidea.lib.utils import parse_legacy_time
from privacyidea.lib.log import log_with
from privacyidea.lib.monitoringstats import (get_stats_keys, get_values,
get_last_value, delete_stats)
from privacyidea.lib.tokenclass import AUTH_DATE_FORMAT
from flask import g
import logging
from privacyidea.lib.policy import ACTION
log = logging.getLogger(__name__)
monitoring_blueprint = Blueprint('monitoring_blueprint', __name__)
@monitoring_blueprint.route('/', methods=['GET'])
@monitoring_blueprint.route('/<stats_key>', methods=['GET'])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.STATISTICSREAD)
def METHOD_NAME(stats_key=None):
"""
return a list of all available statistics keys in the database if no *stats_key*
is specified.
If a stats_key is specified it returns the data of this key.
The parameters "start" and "end" can be used to specify a time window,
from which the statistics data should be fetched.
"""
if stats_key is None:
stats_keys = get_stats_keys()
g.audit_object.log({"success": True})
return send_result(stats_keys)
else:
param = request.all_data
start = getParam(param, "start")
if start:
start = parse_legacy_time(start, return_date=True)
end = getParam(param, "end")
if end:
end = parse_legacy_time(end, return_date=True)
values = get_values(stats_key=stats_key, start_timestamp=start, end_timestamp=end)
# convert timestamps to strings
values_w_string = [(s[0].strftime(AUTH_DATE_FORMAT), s[1]) for s in values]
g.audit_object.log({"success": True})
return send_result(values_w_string)
@monitoring_blueprint.route('/<stats_key>', methods=['DELETE'])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.STATISTICSDELETE)
def delete_statistics(stats_key):
"""
Delete the statistics data of a certain stats_key.
You can specify the start date and the end date when to delete the
monitoring data.
You should specify the dates including the timezone. Otherwise your client
could send its local time and the server would interpret it as its own local
time which would result in deleting unexpected entries.
You can specify the dates like 2010-12-31 22:00+0200
"""
param = request.all_data
start = getParam(param, "start")
if start:
start = parse_legacy_time(start, return_date=True)
end = getParam(param, "end")
if end:
end = parse_legacy_time(end, return_date=True)
r = delete_stats(stats_key, start, end)
g.audit_object.log({"success": True})
return send_result(r)
@monitoring_blueprint.route('/<stats_key>/last', methods=['GET'])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.STATISTICSREAD)
def get_statistics_last(stats_key):
"""
Get the last value of the stats key
"""
last_value = get_last_value(stats_key)
g.audit_object.log({"success": True})
return send_result(last_value)
|
28 | on attachment revision post save | import os
from django.conf import settings as django_settings
from django.db import models
from django.db.models import signals
from django.utils.translation import gettext
from django.utils.translation import gettext_lazy as _
from wiki import managers
from wiki.decorators import disable_signal_for_loaddata
from wiki.models.article import BaseRevisionMixin
from wiki.models.pluginbase import ReusablePlugin
from . import settings
class IllegalFileExtension(Exception):
"""File extension on upload is not allowed"""
pass
class Attachment(ReusablePlugin):
objects = managers.ArticleFkManager()
current_revision = models.OneToOneField(
"AttachmentRevision",
verbose_name=_("current revision"),
blank=True,
null=True,
related_name="current_set",
on_delete=models.CASCADE,
help_text=_(
"The revision of this attachment currently in use (on all articles using the attachment)"
),
)
original_filename = models.CharField(
max_length=256, verbose_name=_("original filename"), blank=True, null=True
)
def can_write(self, user):
if not settings.ANONYMOUS and (not user or user.is_anonymous):
return False
return ReusablePlugin.can_write(self, user)
def can_delete(self, user):
return self.can_write(user)
class Meta:
verbose_name = _("attachment")
verbose_name_plural = _("attachments")
# Matches label of upcoming 0.1 release
db_table = "wiki_attachments_attachment"
def __str__(self):
from wiki.models import Article
try:
return "%s: %s" % (
self.article.current_revision.title,
self.original_filename,
)
except Article.DoesNotExist:
return "Attachment for non-existing article"
def extension_allowed(filename):
try:
extension = filename.split(".")[-1]
except IndexError:
# No extension
raise IllegalFileExtension(
gettext("No file extension found in filename. That's not okay!")
)
if not extension.lower() in map(lambda x: x.lower(), settings.FILE_EXTENSIONS):
raise IllegalFileExtension(
gettext(
"The following filename is illegal: {filename:s}. Extension "
"has to be one of {extensions:s}"
).format(filename=filename, extensions=", ".join(settings.FILE_EXTENSIONS))
)
return extension
def upload_path(instance, filename):
extension = extension_allowed(filename)
# Has to match original extension filename
if instance.id and instance.attachment and instance.attachment.original_filename:
original_extension = instance.attachment.original_filename.split(".")[-1]
if not extension.lower() == original_extension:
raise IllegalFileExtension(
"File extension has to be '%s', not '%s'."
% (original_extension, extension.lower())
)
elif instance.attachment:
instance.attachment.original_filename = filename
upload_path = settings.UPLOAD_PATH
upload_path = upload_path.replace("%aid", str(instance.attachment.article.id))
if settings.UPLOAD_PATH_OBSCURIFY:
import random
import hashlib
m = hashlib.md5(str(random.randint(0, 100000000000000)).encode("ascii"))
upload_path = os.path.join(upload_path, m.hexdigest())
if settings.APPEND_EXTENSION:
filename += ".upload"
return os.path.join(upload_path, filename)
class AttachmentRevision(BaseRevisionMixin, models.Model):
attachment = models.ForeignKey("Attachment", on_delete=models.CASCADE)
file = models.FileField(
upload_to=upload_path, # @ReservedAssignment
max_length=255,
verbose_name=_("file"),
storage=settings.STORAGE_BACKEND,
)
description = models.TextField(blank=True)
class Meta:
verbose_name = _("attachment revision")
verbose_name_plural = _("attachment revisions")
ordering = ("created",)
get_latest_by = "revision_number"
# Matches label of upcoming 0.1 release
db_table = "wiki_attachments_attachmentrevision"
def get_filename(self):
"""Used to retrieve the filename of a revision.
But attachment.original_filename should always be used in the frontend
such that filenames stay consistent."""
# TODO: Perhaps we can let file names change when files are replaced?
if not self.file:
return None
filename = self.file.name.split("/")[-1]
return ".".join(filename.split(".")[:-1])
def get_size(self):
"""Used to retrieve the file size and not cause exceptions."""
try:
return self.file.size
except (ValueError, OSError):
return None
def __str__(self):
return "%s: %s (r%d)" % (
self.attachment.article.current_revision.title,
self.attachment.original_filename,
self.revision_number,
)
@disable_signal_for_loaddata
def on_revision_delete(instance, *args, **kwargs):
if not instance.file:
return
# Remove file
path = instance.file.path.split("/")[:-1]
instance.file.delete(save=False)
# Clean up empty directories
# Check for empty folders in the path. Delete the first two.
max_depth = 1
if len(path) != 0:
if len(path[-1]) == 32:
# Path was (most likely) obscurified so we should look 2 levels down
max_depth = 2
for depth in range(0, max_depth):
delete_path = "/".join(path[:-depth] if depth > 0 else path)
try:
if (
len(os.listdir(os.path.join(django_settings.MEDIA_ROOT, delete_path)))
== 0
):
os.rmdir(delete_path)
except OSError:
# Raised by os.listdir if directory is missing
pass
@disable_signal_for_loaddata
def on_attachment_revision_pre_save(**kwargs):
instance = kwargs["instance"]
if instance._state.adding:
update_previous_revision = (
not instance.previous_revision
and instance.attachment
and instance.attachment.current_revision
and instance.attachment.current_revision != instance
)
if update_previous_revision:
instance.previous_revision = instance.attachment.current_revision
if not instance.revision_number:
try:
previous_revision = instance.attachment.attachmentrevision_set.latest()
instance.revision_number = previous_revision.revision_number + 1
# NB! The above should not raise the below exception, but somehow
# it does.
except (AttachmentRevision.DoesNotExist, Attachment.DoesNotExist):
instance.revision_number = 1
@disable_signal_for_loaddata
def METHOD_NAME(**kwargs):
instance = kwargs["instance"]
if not instance.attachment.current_revision:
# If I'm saved from Django admin, then article.current_revision is
# me!
instance.attachment.current_revision = instance
instance.attachment.save()
signals.pre_delete.connect(on_revision_delete, AttachmentRevision)
signals.pre_save.connect(on_attachment_revision_pre_save, AttachmentRevision)
signals.post_save.connect(METHOD_NAME, AttachmentRevision) |
29 | do all | """
Base class for TIM plugin server. THIS IS DEPRECATED, DO NOT USE IN NEW CODE!
Serving from local port 5000.
"""
import http.server
import json
import logging
import os
import socketserver
from tim_common.fileParams import (
get_template,
file_to_string,
do_headers,
multi_post_params,
get_param,
QueryClass,
get_params,
post_params,
)
PORT = 5000
PROGDIR = "."
class TimServer(http.server.BaseHTTPRequestHandler):
"""Base class for TIM-server. THIS IS DEPRECATED, DO NOT USE IN NEW CODE!"""
def __init__(self, request, client_address, _server):
super().__init__(request, client_address, _server)
self.user_id = "--"
def do_OPTIONS(self):
"""Do needed things for OPTIONS request.
:return: nothing
"""
print("do_OPTIONS ==============================================")
do_headers(self, "text/plain")
print(self.path)
print(self.headers)
def do_GET(self):
"""Do needed things for GET request.
:return: nothing
"""
# print("do_GET ==================================================")
if self.path.find("/reqs") >= 0:
return self.do_reqs()
if self.path.find("/favicon.ico") >= 0:
return self.send_response(404)
if self.path.find("/template") >= 0:
return self.send_text(self.do_template(get_params(self)), "text/plain")
fname = self.path.split("?")[0]
if fname.find(".css") >= 0:
return self.send_text_file(fname, "css", "text/css")
if fname.find(".js") >= 0:
return self.send_text_file(fname, "js", "application/javascript")
if fname.find(".html") >= 0:
return self.send_text_file(fname, "html", "text/html")
return self.METHOD_NAME(get_params(self))
def do_POST(self):
"""Do needed things for POST request This may be a f.ex a request single html-plugin or multiple plugins.
:return: nothing
"""
# print("do_POST =================================================")
if self.path.find("/multihtml") < 0:
return self.METHOD_NAME(post_params(self))
print("do_POST MULTIHTML ==========================================")
queries = multi_post_params(self)
do_headers(self, "application/json")
htmls = []
self.user_id = get_param(queries[0], "user_id", "--")
print("UserId:", self.user_id)
log(self)
# print(queries)
for query in queries:
# print(query.jso)
# print(str(query))
s = self.get_html(query)
# print(s)
htmls.append(s)
# print(htmls)
sresult = json.dumps(htmls)
self.wout(sresult + "\n")
log(self) # to measure time spend in doing all the html
def do_PUT(self):
"""Do needed things for PUT request.
:return: nothing
"""
# print("do_PUT =================================================")
self.METHOD_NAME(post_params(self))
def wout(self, s: str):
"""Write s to servers output stream as UTF8.
:rtype : object
:param s: string to write
:return: nothing
"""
self.wfile.write(s.encode("UTF-8"))
def send_text_file(self, name: str, ftype: str, content_type: str):
"""Sends a file to server from directory ftype with content_type.
:param name: files name part, possible extra directories
:param ftype: files type (js, html, css), specifies also the directory where to get the file
:param content_type: files_content type
:return: nothing
"""
# fname = re.sub(".*/", "", name)
fname = os.path.basename(name)
do_headers(self, content_type)
return self.wout(file_to_string(ftype + "/" + fname))
def send_text(self, txt: str, content_type: str):
"""Sends a txt to server.
:param txt: text to send
:param content_type: files_content type
:return: nothing
"""
# fname = re.sub(".*/", "", name)
do_headers(self, content_type)
return self.wout(txt)
def get_html(self, query: QueryClass) -> str:
"""Return the html for this query. Params are dumbed as hexstring to avoid problems with html input and so on.
:rtype : str
:param query: get or put params
:return : html string for this markup
"""
return ""
def get_reqs_result(self) -> dict:
"""
:return: reqs result as json
"""
return {}
def do_reqs(self):
"""Answer to /reqs route.
:type self: TimServer
"""
do_headers(self, "application/json")
result_json = self.get_reqs_result()
result_str = json.dumps(result_json)
return self.wout(result_str)
def do_template(self, query: QueryClass):
"""Gets a template.
:rtype : str
:param query: get or put params
:return: template result as json
"""
tempfile = get_param(query, "file", "")
tidx = get_param(query, "idx", "0")
return get_template("templates", tidx, tempfile)
def METHOD_NAME(self, query: QueryClass):
"""Do all other routes.
:param query: post and get params
:return: nothing
"""
if self.path.find("/html") >= 0:
do_headers(self, "text/html; charset=utf-8")
s = self.get_html(query)
return self.wout(s)
if self.path.find("/answer") >= 0:
return self.do_answer(query)
do_headers(self, "text/plain")
return self.wout("Unknown query: " + self.path)
def do_answer(self, query: QueryClass):
"""Do answer route.
:param query: post and get params
:return: nothing
"""
def log(request: TimServer):
"""Log the time and user.
:param request:
:return: Nothing
"""
agent = " :AG: " + request.headers["User-Agent"]
if agent.find("ython") >= 0:
agent = ""
logging.info(request.path + agent + " u:" + request.user_id)
# Kun debuggaa Windowsissa, pitää vaihtaa ThreadingMixIn
# Jos ajaa Linuxissa ThreadingMixIn, niin chdir vaihtaa kaikkien hakemistoa?
# Ongelmaa korjattu siten, että kaikki run-kommennot saavat prgpathin käyttöönsä
# if __debug__:
# if True:
class ThreadedHTTPServer(socketserver.ThreadingMixIn, http.server.HTTPServer):
"""Handle requests in a separate thread."""
print("Debug mode/ThreadingMixIn")
# else:
# class ThreadedHTTPServer(socketserver.ForkingMixIn, http.server.HTTPServer):
# """Handle requests in a separate thread."""
# print("Normal mode/ForkingMixIn")
def start_server(http_server):
if not os.path.exists("/var/log"):
os.makedirs("/var/log")
# Logging to file is disabled for now because Docker redirects stdin to an internal JSON file automatically
# and setting ownership to volumes via Docker is not possible.
# logging.basicConfig(filename='/var/log/' + logname + '.log', level=logging.INFO, format='%(asctime)s %(message)s')
logging.basicConfig(level=logging.INFO, format="%(asctime)s %(message)s")
server = ThreadedHTTPServer(("", PORT), http_server)
print("Starting server, use <Ctrl-C> to stop")
logging.info("Starting server")
server.serve_forever() |
30 | forward | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
from fairseq import checkpoint_utils
from fairseq.models import (
register_model,
register_model_architecture,
)
from fairseq.models.speech_to_text import (
ConvTransformerModel,
convtransformer_espnet,
ConvTransformerEncoder,
)
from fairseq.models.speech_to_text.modules.augmented_memory_attention import (
augmented_memory,
SequenceEncoder,
AugmentedMemoryConvTransformerEncoder,
)
from torch import nn, Tensor
from typing import Dict, List
from fairseq.models.speech_to_text.modules.emformer import NoSegAugmentedMemoryTransformerEncoderLayer
@register_model("convtransformer_simul_trans")
class SimulConvTransformerModel(ConvTransformerModel):
"""
Implementation of the paper:
SimulMT to SimulST: Adapting Simultaneous Text Translation to
End-to-End Simultaneous Speech Translation
https://www.aclweb.org/anthology/2020.aacl-main.58.pdf
"""
@staticmethod
def add_args(parser):
super(SimulConvTransformerModel, SimulConvTransformerModel).add_args(parser)
parser.add_argument(
"--train-monotonic-only",
action="store_true",
default=False,
help="Only train monotonic attention",
)
@classmethod
def build_decoder(cls, args, task, embed_tokens):
tgt_dict = task.tgt_dict
from examples.simultaneous_translation.models.transformer_monotonic_attention import (
TransformerMonotonicDecoder,
)
decoder = TransformerMonotonicDecoder(args, tgt_dict, embed_tokens)
if getattr(args, "load_pretrained_decoder_from", None):
decoder = checkpoint_utils.load_pretrained_component_from_model(
component=decoder, checkpoint=args.load_pretrained_decoder_from
)
return decoder
@register_model_architecture(
"convtransformer_simul_trans", "convtransformer_simul_trans_espnet"
)
def convtransformer_simul_trans_espnet(args):
convtransformer_espnet(args)
@register_model("convtransformer_augmented_memory")
@augmented_memory
class AugmentedMemoryConvTransformerModel(SimulConvTransformerModel):
@classmethod
def build_encoder(cls, args):
encoder = SequenceEncoder(args, AugmentedMemoryConvTransformerEncoder(args))
if getattr(args, "load_pretrained_encoder_from", None) is not None:
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@register_model_architecture(
"convtransformer_augmented_memory", "convtransformer_augmented_memory"
)
def augmented_memory_convtransformer_espnet(args):
convtransformer_espnet(args)
# ============================================================================ #
# Convtransformer
# with monotonic attention decoder
# with emformer encoder
# ============================================================================ #
class ConvTransformerEmformerEncoder(ConvTransformerEncoder):
def __init__(self, args):
super().__init__(args)
stride = self.conv_layer_stride(args)
trf_left_context = args.segment_left_context // stride
trf_right_context = args.segment_right_context // stride
context_config = [trf_left_context, trf_right_context]
self.transformer_layers = nn.ModuleList(
[
NoSegAugmentedMemoryTransformerEncoderLayer(
input_dim=args.encoder_embed_dim,
num_heads=args.encoder_attention_heads,
ffn_dim=args.encoder_ffn_embed_dim,
num_layers=args.encoder_layers,
dropout_in_attn=args.dropout,
dropout_on_attn=args.dropout,
dropout_on_fc1=args.dropout,
dropout_on_fc2=args.dropout,
activation_fn=args.activation_fn,
context_config=context_config,
segment_size=args.segment_length,
max_memory_size=args.max_memory_size,
scaled_init=True, # TODO: use constant for now.
tanh_on_mem=args.amtrf_tanh_on_mem,
)
]
)
self.conv_transformer_encoder = ConvTransformerEncoder(args)
def METHOD_NAME(self, src_tokens, src_lengths):
encoder_out: Dict[str, List[Tensor]] = self.conv_transformer_encoder(src_tokens, src_lengths.to(src_tokens.device))
output = encoder_out["encoder_out"][0]
encoder_padding_masks = encoder_out["encoder_padding_mask"]
return {
"encoder_out": [output],
# This is because that in the original implementation
# the output didn't consider the last segment as right context.
"encoder_padding_mask": [encoder_padding_masks[0][:, : output.size(0)]] if len(encoder_padding_masks) > 0
else [],
"encoder_embedding": [],
"encoder_states": [],
"src_tokens": [],
"src_lengths": [],
}
@staticmethod
def conv_layer_stride(args):
# TODO: make it configurable from the args
return 4
@register_model("convtransformer_emformer")
class ConvtransformerEmformer(SimulConvTransformerModel):
@staticmethod
def add_args(parser):
super(ConvtransformerEmformer, ConvtransformerEmformer).add_args(parser)
parser.add_argument(
"--segment-length",
type=int,
metavar="N",
help="length of each segment (not including left context / right context)",
)
parser.add_argument(
"--segment-left-context",
type=int,
help="length of left context in a segment",
)
parser.add_argument(
"--segment-right-context",
type=int,
help="length of right context in a segment",
)
parser.add_argument(
"--max-memory-size",
type=int,
default=-1,
help="Right context for the segment.",
)
parser.add_argument(
"--amtrf-tanh-on-mem",
default=False,
action="store_true",
help="whether to use tanh on memory vector",
)
@classmethod
def build_encoder(cls, args):
encoder = ConvTransformerEmformerEncoder(args)
if getattr(args, "load_pretrained_encoder_from", None):
encoder = checkpoint_utils.load_pretrained_component_from_model(
component=encoder, checkpoint=args.load_pretrained_encoder_from
)
return encoder
@register_model_architecture(
"convtransformer_emformer",
"convtransformer_emformer",
)
def convtransformer_emformer_base(args):
convtransformer_espnet(args) |
31 | test write user to vault skipped | from collections.abc import Callable
import pytest
from pytest_mock import MockerFixture
import reconcile.terraform_users as integ
from reconcile.gql_definitions.common.pgp_reencryption_settings import (
PgpReencryptionSettingsQueryData,
)
from reconcile.terraform_users import (
send_email_invites,
write_user_to_vault,
)
from reconcile.utils.gql import GqlApi
@pytest.fixture
def new_users() -> list[tuple[str, str, str, str]]:
return [
(
"aws1",
"https://console.aws.amazon.com",
"user1",
"enc_password1",
), # gitleaks:allow
]
def test_write_user_to_vault(mocker, new_users):
vm = mocker.patch("reconcile.terraform_users._VaultClient", autospec=True)
write_user_to_vault(vm, "test", new_users, [])
vm.write.assert_called_once_with(
{
"path": "test/aws1_user1",
"data": {
"account": "aws1",
"user_name": "user1",
"console_url": "https://console.aws.amazon.com",
"encrypted_password": "enc_password1", # gitleaks:allow
},
},
decode_base64=False,
)
def METHOD_NAME(mocker, new_users):
vm = mocker.patch("reconcile.terraform_users._VaultClient", autospec=True)
write_user_to_vault(vm, "test", new_users, ["aws1"])
vm.write.assert_not_called()
def test_send_email_invites(mocker, new_users):
sm = mocker.patch("reconcile.terraform_users.SmtpClient", autospec=True)
send_email_invites(new_users, sm, ["aws1"])
sm.send_mails.assert_called_once()
def test_send_email_invites_skip(mocker, new_users):
sm = mocker.patch("reconcile.terraform_users.SmtpClient", autospec=True)
send_email_invites(new_users, sm, [])
sm.send_mails.assert_not_called()
@pytest.fixture
def pgp_reencryption_settings(
gql_class_factory: Callable[..., PgpReencryptionSettingsQueryData],
) -> PgpReencryptionSettingsQueryData:
return gql_class_factory(
PgpReencryptionSettingsQueryData,
{
"pgp_reencryption_settings": [],
},
)
@pytest.fixture
def test_aws_account_role() -> dict:
return {
"name": "test_aws_account",
"users": [{"name": "test-user"}],
"aws_groups": [
{
"name": "test-group",
"account": {
"name": "test-account",
},
}
],
"user_policies": [
{
"name": "test-policy",
"account": {
"name": "test-account",
},
},
],
}
@pytest.fixture
def test_aws_account() -> dict:
return {
"name": "test-account",
}
def test_setup(
mocker: MockerFixture,
test_aws_account: dict,
test_aws_account_role: dict,
gql_api_builder: Callable[..., GqlApi],
) -> None:
mocked_gql_api = gql_api_builder({"roles": [test_aws_account_role]})
mocker.patch("reconcile.terraform_users.gql").get_api.return_value = mocked_gql_api
mocked_queries = mocker.patch("reconcile.terraform_users.queries")
mocked_queries.get_aws_accounts.return_value = [test_aws_account]
mocked_queries.get_app_interface_settings.return_value = None
mocked_ts = mocker.patch("reconcile.terraform_users.Terrascript", autospec=True)
mocked_aws = mocker.patch("reconcile.terraform_users.AWSApi", autospec=True)
thread_pool_size = 1
accounts, working_dirs, setup_err, aws_api = integ.setup(
False, thread_pool_size, []
)
assert accounts == [test_aws_account]
assert working_dirs == mocked_ts.return_value.dump.return_value
assert setup_err == mocked_ts.return_value.populate_users.return_value
assert aws_api == mocked_aws.return_value
mocked_ts.assert_called_once_with(
integ.QONTRACT_INTEGRATION,
integ.QONTRACT_TF_PREFIX,
thread_pool_size,
[test_aws_account],
settings=None,
)
mocked_ts.return_value.populate_users.assert_called_once_with(
[test_aws_account_role],
[],
appsre_pgp_key=None,
)
mocked_aws.assert_called_once_with(
1,
[test_aws_account],
settings=None,
init_users=False,
)
def test_empty_run(
mocker: MockerFixture,
pgp_reencryption_settings: PgpReencryptionSettingsQueryData,
test_aws_account: dict,
gql_api_builder: Callable[..., GqlApi],
) -> None:
mocked_gql_api = gql_api_builder({"roles": []})
mocker.patch("reconcile.terraform_users.gql").get_api.return_value = mocked_gql_api
mocker.patch(
"reconcile.terraform_users.query"
).return_value = pgp_reencryption_settings
mocker.patch("reconcile.terraform_users.sys")
mocked_queries = mocker.patch("reconcile.terraform_users.queries")
mocked_queries.get_aws_accounts.return_value = [test_aws_account]
mocked_queries.get_app_interface_settings.return_value = None
mocker.patch("reconcile.terraform_users.Terrascript", autospec=True)
mocker.patch("reconcile.terraform_users.AWSApi", autospec=True)
mocked_logging = mocker.patch("reconcile.terraform_users.logging")
integ.run(False, send_mails=False)
mocked_logging.warning.assert_called_once_with(
"No participating AWS accounts found, consider disabling this integration, account name: None"
) |
32 | get new attachments | from django.utils.translation import npgettext, pgettext
from rest_framework import serializers
from rest_framework.fields import empty
from . import PostingEndpoint, PostingMiddleware
from ....acl.objectacl import add_acl_to_obj
from ...serializers import AttachmentSerializer
class AttachmentsMiddleware(PostingMiddleware):
def use_this_middleware(self):
return bool(self.user_acl["max_attachment_size"])
def get_serializer(self):
return AttachmentsSerializer(
data=self.request.data,
context={
"mode": self.mode,
"user": self.user,
"user_acl": self.user_acl,
"post": self.post,
"settings": self.settings,
},
)
def save(self, serializer):
serializer.save()
class AttachmentsSerializer(serializers.Serializer):
attachments = serializers.ListField(
child=serializers.IntegerField(), required=False
)
def __init__(self, *args, **kwargs):
self.update_attachments = False
self.removed_attachments = []
self.final_attachments = []
super().__init__(*args, **kwargs)
def validate_attachments(self, ids):
ids = list(set(ids))
validate_attachments_count(ids, self.context["settings"])
attachments = self.get_initial_attachments()
new_attachments = self.METHOD_NAME(ids)
if not attachments and not new_attachments:
return [] # no attachments
# clean existing attachments
for attachment in attachments:
if attachment.pk in ids:
self.final_attachments.append(attachment)
else:
if attachment.acl["can_delete"]:
self.update_attachments = True
self.removed_attachments.append(attachment)
else:
message = pgettext(
"posting api",
'You don\'t have permission to remove "%(attachment)s" attachment.',
)
raise serializers.ValidationError(
message % {"attachment": attachment.filename}
)
if new_attachments:
self.update_attachments = True
self.final_attachments += new_attachments
self.final_attachments.sort(key=lambda a: a.pk, reverse=True)
def get_initial_attachments(self):
attachments = []
if self.context["mode"] == PostingEndpoint.EDIT:
queryset = self.context["post"].attachment_set.select_related("filetype")
attachments = list(queryset)
add_acl_to_obj(self.context["user_acl"], attachments)
return attachments
def METHOD_NAME(self, ids):
if not ids:
return []
queryset = (
self.context["user"]
.attachment_set.select_related("filetype")
.filter(post__isnull=True, id__in=ids)
)
return list(queryset)
def save(self):
if not self.update_attachments:
return
if self.removed_attachments:
for attachment in self.removed_attachments:
attachment.delete_files()
self.context["post"].attachment_set.filter(
id__in=[a.id for a in self.removed_attachments]
).delete()
if self.final_attachments:
# sort final attachments by id, descending
self.final_attachments.sort(key=lambda a: a.pk, reverse=True)
self.context["user"].attachment_set.filter(
id__in=[a.id for a in self.final_attachments]
).update(post=self.context["post"])
self.sync_attachments_cache(self.context["post"], self.final_attachments)
def sync_attachments_cache(self, post, attachments):
if attachments:
post.attachments_cache = AttachmentSerializer(attachments, many=True).data
for attachment in post.attachments_cache:
del attachment["acl"]
del attachment["post"]
else:
post.attachments_cache = None
post.update_fields.append("attachments_cache")
def validate_attachments_count(data, settings):
total_attachments = len(data)
if total_attachments > settings.post_attachments_limit:
# pylint: disable=line-too-long
message = npgettext(
"posting api",
"You can't attach more than %(limit_value)s file to single post (added %(show_value)s).",
"You can't attach more than %(limit_value)s flies to single post (added %(show_value)s).",
settings.post_attachments_limit,
)
raise serializers.ValidationError(
message
% {
"limit_value": settings.post_attachments_limit,
"show_value": total_attachments,
}
) |
33 | tag names iter | """Abstract representation of a Potential object."""
from abc import abstractmethod
from typing import Any, Dict, Iterator, List
from gmso.abc.gmso_base import GMSOBase
from gmso.utils.expression import PotentialExpression
try:
from pydantic.v1 import Field, validator
except ImportError:
from pydantic import Field, validator
class AbstractPotential(GMSOBase):
__base_doc__ = """An abstract potential class.
AbstractPotential stores a general interaction between components of a chemical
topology that can be specified by a mathematical expression. The functional
form of the potential is stored as a `sympy` expression and the parameters
are stored explicitly. This class is agnostic to the instantiation of the
potential, which can be e.g. a non-bonded potential, a bonded potential, an
angle potential, a dihedral potential, etc. and is designed to be inherited
by classes that represent these potentials.
"""
name_: str = Field(
"", description="The name of the potential. Defaults to class name"
)
potential_expression_: PotentialExpression = Field(
PotentialExpression(expression="a*x+b", independent_variables={"x"}),
description="The mathematical expression for the potential",
)
tags_: Dict[str, Any] = Field(
{}, description="Tags associated with the potential"
)
def __init__(
self,
name="Potential",
expression="a*x+b",
independent_variables=None,
potential_expression=None,
**kwargs,
):
if potential_expression is None:
if expression is None:
expression = "a*x+b"
if independent_variables is None:
independent_variables = {"x"}
potential_expression = PotentialExpression(
expression=expression,
independent_variables=independent_variables,
parameters=None,
)
if not kwargs.get("tags"):
kwargs["tags"] = {}
super().__init__(
name=name, potential_expression=potential_expression, **kwargs
)
@property
def name(self):
"""The name of the potential."""
return self.__dict__.get("name_")
@property
def independent_variables(self):
"""Optional[Union[set, str]]\n\tThe independent variables in the `Potential`'s expression."""
return self.potential_expression_.independent_variables
@property
def expression(self):
"""Optional[Union[str, sympy.Expr]]\n\tThe mathematical expression of the functional form of the potential."""
return self.potential_expression_.expression
@property
def potential_expression(self):
"""Return the functional form of the potential."""
return self.__dict__.get("potential_expression_")
@property
def tags(self):
return self.__dict__.get("tags_")
@property
def tag_names(self) -> List[str]:
return list(self.__dict__.get("tags_"))
@property
def METHOD_NAME(self) -> Iterator[str]:
return iter(self.__dict__.get("tags_"))
def add_tag(self, tag: str, value: Any, overwrite=True) -> None:
"""Add metadata for a particular tag"""
if self.tags.get(tag) and not overwrite:
raise ValueError(
f"Tag {tag} already exists. "
f"Please use overwrite=True to overwrite"
)
self.tags[tag] = value
def get_tag(self, tag: str, throw=False) -> Any:
"""Get value of a particular tag"""
if throw:
return self.tags[tag]
else:
return self.tags.get(tag)
def delete_tag(self, tag: str) -> None:
del self.tags[tag]
def pop_tag(self, tag: str) -> Any:
return self.tags.pop(tag, None)
@validator("potential_expression_", pre=True)
def validate_potential_expression(cls, v):
if isinstance(v, dict):
v = PotentialExpression(**v)
return v
@abstractmethod
def set_expression(self):
"""Set the functional form of the expression."""
raise NotImplementedError
def __setattr__(self, key: Any, value: Any) -> None:
"""Set attributes of the potential."""
if key == "expression":
self.potential_expression_.expression = value
elif key == "independent_variables":
self.potential_expression_.independent_variables = value
elif key == "set_ref_":
return
else:
super().__setattr__(key, value)
def __repr__(self):
"""Return a formatted representation of the potential."""
desc = (
f"<{self.__class__.__name__} {self.name},\n "
f"expression: {self.expression},\n "
f"id: {id(self)}>"
)
return desc
def __str__(self):
"""Return a string representation of the potential."""
return (
f"<{self.__class__.__name__} {self.name}, "
f"expression: {self.expression}, "
f"id: {id(self)}>"
)
class Config:
"""Pydantic configuration for the potential objects."""
fields = {
"name_": "name",
"potential_expression_": "potential_expression",
"tags_": "tags",
}
alias_to_fields = {
"name": "name_",
"potential_expression": "potential_expression_",
"tags": "tags_",
} |
34 | test address update mutation | from unittest.mock import patch
import graphene
from freezegun import freeze_time
from ......account.search import generate_address_search_document_value
from ......webhook.event_types import WebhookEventAsyncType
from .....tests.utils import assert_no_permission, get_graphql_content
from ..utils import generate_address_webhook_call_args
ADDRESS_UPDATE_MUTATION = """
mutation updateUserAddress($addressId: ID!, $address: AddressInput!) {
addressUpdate(id: $addressId, input: $address) {
address {
city
metadata {
key
value
}
}
user {
id
}
}
}
"""
def METHOD_NAME(
staff_api_client, customer_user, permission_manage_users, graphql_address_data
):
query = ADDRESS_UPDATE_MUTATION
address_obj = customer_user.addresses.first()
assert staff_api_client.user not in address_obj.user_addresses.all()
variables = {
"addressId": graphene.Node.to_global_id("Address", address_obj.id),
"address": graphql_address_data,
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
data = content["data"]["addressUpdate"]
assert data["address"]["metadata"] == [{"key": "public", "value": "public_value"}]
assert data["address"]["city"] == graphql_address_data["city"].upper()
address_obj.refresh_from_db()
assert address_obj.city == graphql_address_data["city"].upper()
customer_user.refresh_from_db()
assert (
generate_address_search_document_value(address_obj)
in customer_user.search_document
)
@freeze_time("2022-05-12 12:00:00")
@patch("saleor.plugins.webhook.plugin.get_webhooks_for_event")
@patch("saleor.plugins.webhook.plugin.trigger_webhooks_async")
def test_address_update_mutation_trigger_webhook(
mocked_webhook_trigger,
mocked_get_webhooks_for_event,
any_webhook,
staff_api_client,
customer_user,
permission_manage_users,
graphql_address_data,
settings,
):
# given
mocked_get_webhooks_for_event.return_value = [any_webhook]
settings.PLUGINS = ["saleor.plugins.webhook.plugin.WebhookPlugin"]
address = customer_user.addresses.first()
assert staff_api_client.user not in address.user_addresses.all()
variables = {
"addressId": graphene.Node.to_global_id("Address", address.id),
"address": graphql_address_data,
}
# when
response = staff_api_client.post_graphql(
ADDRESS_UPDATE_MUTATION, variables, permissions=[permission_manage_users]
)
content = get_graphql_content(response)
address.refresh_from_db()
# then
assert content["data"]["addressUpdate"]
mocked_webhook_trigger.assert_called_with(
*generate_address_webhook_call_args(
address,
WebhookEventAsyncType.ADDRESS_UPDATED,
staff_api_client.user,
any_webhook,
)
)
@patch("saleor.graphql.account.mutations.base.prepare_user_search_document_value")
def test_address_update_mutation_no_user_assigned(
prepare_user_search_document_value_mock,
staff_api_client,
address,
permission_manage_users,
graphql_address_data,
):
# given
query = ADDRESS_UPDATE_MUTATION
variables = {
"addressId": graphene.Node.to_global_id("Address", address.id),
"address": graphql_address_data,
}
# when
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_users]
)
# then
content = get_graphql_content(response)
data = content["data"]["addressUpdate"]
assert data["address"]["city"] == graphql_address_data["city"].upper()
prepare_user_search_document_value_mock.assert_not_called()
def test_customer_update_address_for_other(
user_api_client, customer_user, address_other_country, graphql_address_data
):
address_obj = address_other_country
assert customer_user not in address_obj.user_addresses.all()
address_data = graphql_address_data
variables = {
"addressId": graphene.Node.to_global_id("Address", address_obj.id),
"address": address_data,
}
response = user_api_client.post_graphql(ADDRESS_UPDATE_MUTATION, variables)
assert_no_permission(response) |
35 | application instance | import json
import time
from urllib.parse import urlencode
import oauthlib.common
import oauthlib.oauth1
import pytest
from h_matchers import Any
from lms.models import Assignment
from lms.resources._js_config import JSConfig
from tests import factories
class TestBasicLTILaunch:
def test_requests_with_no_oauth_signature_are_forbidden(
self, lti_params, do_lti_launch
):
del lti_params["oauth_signature"]
response = do_lti_launch(post_params=lti_params, status=403)
assert response.headers["Content-Type"] == Any.string.matching("^text/html")
assert response.html
def test_unconfigured_basic_lti_launch(self, lti_params, do_lti_launch):
response = do_lti_launch(
post_params=lti_params,
status=200,
)
assert self.get_client_config(response)["mode"] == JSConfig.Mode.FILE_PICKER
def test_db_configured_basic_lti_launch(
self, lti_params, assignment, do_lti_launch
):
response = do_lti_launch(post_params=lti_params, status=200)
js_config = self.get_client_config(response)
assert js_config["mode"] == JSConfig.Mode.BASIC_LTI_LAUNCH
assert urlencode({"url": assignment.document_url}) in js_config["viaUrl"]
def test_basic_lti_launch_canvas_deep_linking_url(
self, do_lti_launch, url_launch_params, db_session
):
get_params, post_params = url_launch_params
response = do_lti_launch(
get_params=get_params, post_params=post_params, status=200
)
js_config = self.get_client_config(response)
assert js_config["mode"] == JSConfig.Mode.BASIC_LTI_LAUNCH
assert (
urlencode({"url": "https://url-configured.com/document.pdf"})
in js_config["viaUrl"]
)
assert (
db_session.query(Assignment)
.filter_by(document_url="https://url-configured.com/document.pdf")
.count()
== 1
)
def test_basic_lti_launch_canvas_deep_linking_canvas_file(
self, do_lti_launch, db_session, canvas_file_launch_params
):
get_params, post_params = canvas_file_launch_params
response = do_lti_launch(
get_params=get_params, post_params=post_params, status=200
)
js_config = self.get_client_config(response)
assert js_config["mode"] == JSConfig.Mode.BASIC_LTI_LAUNCH
assert (
js_config["api"]["viaUrl"]["path"]
== "/api/canvas/assignments/rli-1234/via_url"
)
assert (
db_session.query(Assignment)
.filter_by(document_url="canvas://file/course/1/file_id/2")
.count()
== 1
)
@pytest.fixture(autouse=True)
def METHOD_NAME(self, db_session): # pylint:disable=unused-argument
return factories.ApplicationInstance(
tool_consumer_instance_guid="IMS Testing",
organization=factories.Organization(),
)
@pytest.fixture
def assignment(self, db_session, METHOD_NAME, lti_params):
assignment = Assignment(
resource_link_id=lti_params["resource_link_id"],
tool_consumer_instance_guid=METHOD_NAME.tool_consumer_instance_guid,
document_url="http://db-configured.com/document.pdf",
)
db_session.add(assignment)
db_session.commit()
return assignment
@pytest.fixture
def oauth_client(self, METHOD_NAME):
return oauthlib.oauth1.Client(
METHOD_NAME.consumer_key, METHOD_NAME.shared_secret
)
@pytest.fixture
def lti_params(self, METHOD_NAME, sign_lti_params):
params = {
"context_id": "con-182",
"context_label": "SI182",
"context_title": "Design of Personal Environments",
"context_type": "CourseSection",
"custom_context_memberships_url": "https://apps.imsglobal.org/lti/cert/tp/tp_membership.php/context/con-182/membership?b64=a2puNjk3b3E5YTQ3Z28wZDRnbW5xYzZyYjU%3D",
"custom_context_setting_url": "https://apps.imsglobal.org/lti/cert/tp/tp_settings.php/lis/CourseSection/con-182/bindings/ims/cert/custom?b64=a2puNjk3b3E5YTQ3Z28wZDRnbW5xYzZyYjU%3D",
"custom_link_setting_url": "$LtiLink.custom.url",
"custom_system_setting_url": "https://apps.imsglobal.org/lti/cert/tp/tp_settings.php/ToolProxy/Hypothesis1b40eafba184a131307049e01e9c147d/custom?b64=a2puNjk3b3E5YTQ3Z28wZDRnbW5xYzZyYjU%3D",
"custom_tc_profile_url": "https://apps.imsglobal.org/lti/cert/tp/tp_tcprofile.php?b64=a2puNjk3b3E5YTQ3Z28wZDRnbW5xYzZyYjU%3D",
"launch_presentation_document_target": "iframe",
"launch_presentation_locale": "en_US",
"launch_presentation_return_url": "https://apps.imsglobal.org/lti/cert/tp/tp_return.php/basic-lti-launch-request",
"lis_course_section_sourcedid": "id-182",
"lis_person_contact_email_primary": "jane@school.edu",
"lis_person_name_family": "Lastname",
"lis_person_name_full": "Jane Q. Lastname",
"lis_person_name_given": "Jane",
"lis_person_sourcedid": "school.edu:jane",
"lti_message_type": "basic-lti-launch-request",
"lti_version": "LTI-1p0",
"oauth_callback": "about:blank",
"oauth_consumer_key": METHOD_NAME.consumer_key,
"oauth_nonce": "38d6db30e395417659d068164ca95169",
"oauth_signature_method": "HMAC-SHA1",
"oauth_timestamp": str(int(time.time())),
"oauth_version": "1.0",
"resource_link_id": "rli-1234",
"resource_link_title": "Link 1234",
"resourcelinkid": "rli-1234",
"roles": "Instructor",
"tool_consumer_info_product_family_code": "imsglc",
"tool_consumer_info_version": "1.1",
"tool_consumer_instance_description": "IMS Testing Description",
"tool_consumer_instance_guid": METHOD_NAME.tool_consumer_instance_guid,
"tool_consumer_instance_name": "IMS Testing Instance",
"user_id": "123456",
}
return sign_lti_params(params)
@pytest.fixture
def canvas_file_launch_params(self, lti_params, sign_lti_params):
return {"canvas_file": "true", "file_id": "2"}, sign_lti_params(
dict(
lti_params,
custom_canvas_course_id="1",
tool_consumer_info_product_family_code="canvas",
)
)
@pytest.fixture
def url_launch_params(self, lti_params, sign_lti_params):
return {}, sign_lti_params(
dict(
lti_params,
url="https://url-configured.com/document.pdf",
tool_consumer_info_product_family_code="canvas",
)
)
@pytest.fixture
def sign_lti_params(self, oauth_client):
def _sign(params):
params["oauth_signature"] = oauth_client.get_oauth_signature(
oauthlib.common.Request(
"http://localhost/lti_launches", "POST", body=params
)
)
return params
return _sign
def get_client_config(self, response):
return json.loads(response.html.find("script", {"class": "js-config"}).string) |
36 | fit | import argparse
import logging
import os
import sys
import tensorflow as tf
from datasets import load_dataset
from tqdm import tqdm
from transformers import AutoTokenizer, TFAutoModelForSequenceClassification
from transformers.file_utils import is_sagemaker_dp_enabled
if os.environ.get("SDP_ENABLED") or is_sagemaker_dp_enabled():
SDP_ENABLED = True
os.environ["SAGEMAKER_INSTANCE_TYPE"] = "p3dn.24xlarge"
import smdistributed.dataparallel.tensorflow as sdp
else:
SDP_ENABLED = False
def METHOD_NAME(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None):
pbar = tqdm(train_dataset)
for i, batch in enumerate(pbar):
with tf.GradientTape() as tape:
inputs, targets = batch
outputs = model(batch)
loss_value = loss(targets, outputs.logits)
if SDP_ENABLED:
tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True)
grads = tape.gradient(loss_value, model.trainable_variables)
opt.apply_gradients(zip(grads, model.trainable_variables))
pbar.set_description(f"Loss: {loss_value:.4f}")
if SDP_ENABLED:
if i == 0:
sdp.broadcast_variables(model.variables, root_rank=0)
sdp.broadcast_variables(opt.variables(), root_rank=0)
first_batch = False
if max_steps and i >= max_steps:
break
train_results = {"loss": loss_value.numpy()}
return train_results
def get_datasets():
# Load dataset
train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"])
# Preprocess train dataset
train_dataset = train_dataset.map(
lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True
)
train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"])
train_features = {x: train_dataset[x] for x in ["input_ids", "attention_mask"]}
tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"]))
# Preprocess test dataset
test_dataset = test_dataset.map(
lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True
)
test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"])
test_features = {x: test_dataset[x] for x in ["input_ids", "attention_mask"]}
tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"]))
if SDP_ENABLED:
tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank())
tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank())
tf_train_dataset = tf_train_dataset.batch(args.train_batch_size, drop_remainder=True)
tf_test_dataset = tf_test_dataset.batch(args.eval_batch_size, drop_remainder=True)
return tf_train_dataset, tf_test_dataset
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Hyperparameters sent by the client are passed as command-line arguments to the script.
parser.add_argument("--epochs", type=int, default=1)
parser.add_argument("--train_batch_size", type=int, default=16)
parser.add_argument("--eval_batch_size", type=int, default=8)
parser.add_argument("--model_name", type=str)
parser.add_argument("--learning_rate", type=str, default=5e-5)
parser.add_argument("--do_train", type=bool, default=True)
parser.add_argument("--do_eval", type=bool, default=True)
parser.add_argument("--max_steps", type=int)
# Data, model, and output directories
parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"])
parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"])
parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"])
args, _ = parser.parse_known_args()
# Set up logging
logger = logging.getLogger(__name__)
logging.basicConfig(
level=logging.getLevelName("INFO"),
handlers=[logging.StreamHandler(sys.stdout)],
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
)
learning_rate = args.learning_rate
if SDP_ENABLED:
sdp.init()
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
if gpus:
tf.config.experimental.set_visible_devices(gpus[sdp.local_rank()], "GPU")
learning_rate = learning_rate * sdp.size()
# Load model and tokenizer
model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name)
tokenizer = AutoTokenizer.from_pretrained(args.model_name)
# get datasets
tf_train_dataset, tf_test_dataset = get_datasets()
# fine optimizer and loss
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metrics = [tf.keras.metrics.SparseCategoricalAccuracy()]
model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
# Training
if args.do_train:
# train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.train_batch_size)
train_results = METHOD_NAME(
model,
loss,
optimizer,
tf_train_dataset,
args.epochs,
args.train_batch_size,
max_steps=args.max_steps,
)
logger.info("*** Train ***")
output_eval_file = os.path.join(args.output_data_dir, "train_results.txt")
if not SDP_ENABLED or sdp.rank() == 0:
with open(output_eval_file, "w") as writer:
logger.info("***** Train results *****")
logger.info(train_results)
for key, value in train_results.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Evaluation
if args.do_eval and (not SDP_ENABLED or sdp.rank() == 0):
result = model.evaluate(tf_test_dataset, batch_size=args.eval_batch_size, return_dict=True)
logger.info("*** Evaluate ***")
output_eval_file = os.path.join(args.output_data_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
logger.info(result)
for key, value in result.items():
logger.info(" %s = %s", key, value)
writer.write("%s = %s\n" % (key, value))
# Save result
if SDP_ENABLED:
if sdp.rank() == 0:
model.save_pretrained(args.model_dir)
tokenizer.save_pretrained(args.model_dir)
else:
model.save_pretrained(args.model_dir)
tokenizer.save_pretrained(args.model_dir) |
37 | dd | # Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Playing around with constants only. """
# pylint: disable=invalid-name,redefined-outer-name
from __future__ import print_function
try:
long
except NameError:
long = int
def displayDict(d):
result = "{"
first = True
for key, value in sorted(d.items()):
if not first:
result += ","
result += "%s: %s" % (repr(key), repr(value))
first = False
result += "}"
return result
print("A bunch of constants and their representation:")
for value in (0, 3, -4, 17, "hey", (0,), 0.0, -0.0):
print(value, ":", repr(value))
print("Comparing constants, optimizable:")
print(1 == 0)
print("Representation of long constants:")
a = long(0)
print(repr(long(0)), repr(a) == "0L")
print("Identity of empty dictionary constants:")
print({} is {})
a = ({}, [])
a[0][1] = 2
a[1].append(3)
print("Mutable list and dict inside an immutable tuple:")
print(a)
print("Empty list and dict are hopefully unchanged:")
print(({}, []))
def argChanger(a):
a[0][1] = 2
a[1].append(3)
return a
print("Mutable list and dict inside an immutable tuple as arguments:")
print(argChanger(({}, [])))
print("Empty list and dict are hopefully still unchanged:")
print(({}, []))
print("Set constants:")
print(set(["foo"]))
def mutableConstantChanger():
a = ([1, 2], [3])
print("Start out with value:")
print(a)
a[1].append(5)
print("Changed to value:")
print(a)
d = {"l": [], "m": []}
print("Start out with value:")
print(d)
d["l"].append(7)
print("Changed to value:")
print(d)
spec = dict(qual=[], storage=set(), type=[], function=set(), q=1)
spec["type"].insert(0, 2)
spec["storage"].add(3)
print("Dictionary created from dict built-in.")
print(sorted(spec))
mutableConstantChanger()
print("Redo constant changes, to catch corruptions:")
mutableConstantChanger()
def defaultKeepsIdentity(arg="str_value"):
print("Default constant values are still shared if immutable:", arg is "str_value")
defaultKeepsIdentity()
# Dictionary creation from call arguments.
def METHOD_NAME(**d):
return d
def f():
def one():
print("one")
def two():
print("two")
a = METHOD_NAME(qual=one(), storage=two(), type=[], function=[])
print("f mutable", displayDict(a))
a = METHOD_NAME(qual=1, storage=2, type=3, function=4)
print("f immutable", displayDict(a))
x = {"p": 7}
a = METHOD_NAME(qual=[], storage=[], type=[], function=[], **x)
print("f ext mutable", displayDict(a))
x = {"p": 8}
a = METHOD_NAME(qual=1, storage=2, type=3, function=4, **x)
print("f ext immutable", displayDict(a))
f()
# Dictionary creation one after another
x = {}
x["function"] = []
x["type"] = []
x["storage"] = []
x["qual"] = []
print("Manual built dictionary:", x)
x = {}
x["function"] = 1
x["type"] = 2
x["storage"] = 3
x["qual"] = 4
print("Manual built dictionary:", x)
# Constants in the code must be created differently.
d = {"qual": [], "storage": [], "type2": [], "function": []}
print("Mutable values dictionary constant:", displayDict(d))
d = {"qual": 1, "storage": 2, "type2": 3, "function": 4}
print("Immutable values dictionary constant:", displayDict(d))
# Constants that might be difficult
min_signed_int = int(-(2 ** (8 * 8 - 1) - 1) - 1)
print("Small int:", min_signed_int, type(min_signed_int))
min_signed_int = int(-(2 ** (8 * 4 - 1) - 1) - 1)
print("Small int", min_signed_int, type(min_signed_int))
# Constants that might be difficult
min_signed_long = long(-(2 ** (8 * 8 - 1) - 1) - 1)
print("Small long", min_signed_long, type(min_signed_long))
min_signed_long = long(-(2 ** (8 * 4 - 1) - 1) - 1)
print("Small long", min_signed_long, type(min_signed_long))
try:
type_prepare = type.__prepare__
except AttributeError:
print("Python2 has no type.__prepare__")
else:
print("Type prepare", type_prepare) |
38 | test | ##########################################################################
#
# Copyright (c) 2020, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class BoolPlugValueWidgetTest( GafferUITest.TestCase ) :
def METHOD_NAME( self ) :
n = Gaffer.Node()
n["user"]["p1"] = Gaffer.BoolPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
n["user"]["p2"] = Gaffer.BoolPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
w = GafferUI.BoolPlugValueWidget( n["user"]["p1"] )
self.assertEqual( w.getPlug(), n["user"]["p1"] )
self.assertEqual( w.getPlugs(), { n["user"]["p1"] } )
self.assertEqual( w.boolWidget().getState(), False )
n["user"]["p1"].setValue( True )
GafferUITest.PlugValueWidgetTest.waitForUpdate( w )
self.assertEqual( w.boolWidget().getState(), True )
w.setPlugs( n["user"].children() )
GafferUITest.PlugValueWidgetTest.waitForUpdate( w )
self.assertEqual( w.boolWidget().getState(), w.boolWidget().State.Indeterminate )
n["user"]["p2"].setValue( True )
GafferUITest.PlugValueWidgetTest.waitForUpdate( w )
self.assertEqual( w.boolWidget().getState(), True )
w.setPlugs( [] )
GafferUITest.PlugValueWidgetTest.waitForUpdate( w )
self.assertEqual( w.boolWidget().getState(), w.boolWidget().State.Indeterminate )
def testInitialValue( self ) :
n = Gaffer.Node()
n["user"]["p"] = Gaffer.BoolPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
for v in ( True, False ) :
n["user"]["p"].setValue( v )
w = GafferUI.BoolPlugValueWidget( n["user"]["p"] )
GafferUITest.PlugValueWidgetTest.waitForUpdate( w )
self.assertEqual( w.boolWidget().getState(), v )
def testErrorHandling( self ) :
script = Gaffer.ScriptNode()
script["n"] = Gaffer.Node()
script["n"]["user"]["p"] = Gaffer.BoolPlug( flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
w = GafferUI.BoolPlugValueWidget( script["n"]["user"]["p"] )
self.assertFalse( w.boolWidget().getErrored() )
script["b"] = GafferTest.BadNode()
script["n"]["user"]["p"].setInput( script["b"]["out3"] )
GafferUITest.PlugValueWidgetTest.waitForUpdate( w )
self.assertTrue( w.boolWidget().getErrored() )
script["n"]["user"]["p"].setInput( None )
GafferUITest.PlugValueWidgetTest.waitForUpdate( w )
self.assertFalse( w.boolWidget().getErrored() )
if __name__ == "__main__":
unittest.main() |
39 | test repr | import pytest
from qcodes.parameters import Parameter
from qcodes.parameters.sweep_values import SweepValues
from qcodes.validators import Numbers
@pytest.fixture(name='c0')
def _make_c0():
c0 = Parameter('c0', vals=Numbers(-10, 10), get_cmd=None, set_cmd=None)
yield c0
@pytest.fixture(name='c1')
def _make_c1():
c1 = Parameter('c1', get_cmd=None, set_cmd=None)
yield c1
@pytest.fixture(name='c2')
def _make_c2():
c2 = Parameter('c2', get_cmd=lambda: 42)
yield c2
def test_errors(c0, c1, c2) -> None:
# only complete 3-part slices are valid
with pytest.raises(TypeError):
c0[1:2] # For Int params this could be defined as step=1
with pytest.raises(TypeError):
c0[:2:3]
with pytest.raises(TypeError):
c0[1::3]
with pytest.raises(TypeError):
c0[:] # For Enum params we *could* define this one too...
# fails if the parameter has no setter
with pytest.raises(TypeError):
c2[0:0.1:0.01] # type: ignore[misc]
# validates every step value against the parameter's Validator
with pytest.raises(ValueError):
c0[5:15:1]
with pytest.raises(ValueError):
c0[5.0:15.0:1.0] # type: ignore[misc]
with pytest.raises(ValueError):
c0[-12]
with pytest.raises(ValueError):
c0[-5, 12, 5]
with pytest.raises(ValueError):
c0[-5, 12:8:1, 5]
# cannot combine SweepValues for different parameters
with pytest.raises(TypeError):
_ = c0[0.1] + c1[0.2]
# improper use of extend
with pytest.raises(TypeError):
c0[0.1].extend(5)
# SweepValue object has no getter, even if the parameter does
with pytest.raises(AttributeError):
c0[0.1].get
def test_valid(c0) -> None:
c0_sv = c0[1]
# setter gets mapped
assert c0_sv.set == c0.set
# normal sequence operations access values
assert list(c0_sv) == [1]
assert c0_sv[0] == 1
assert 1 in c0_sv
assert 2 not in c0_sv
# in-place and copying addition
c0_sv += c0[1.5:1.8:0.1] # type: ignore[misc]
c0_sv2 = c0_sv + c0[2]
assert list(c0_sv) == [1, 1.5, 1.6, 1.7]
assert list(c0_sv2) == [1, 1.5, 1.6, 1.7, 2]
# append and extend
c0_sv3 = c0[2]
# append only works with straight values
c0_sv3.append(2.1)
# extend can use another SweepValue, (even if it only has one value)
c0_sv3.extend(c0[2.2])
# extend can also take a sequence
c0_sv3.extend([2.3])
# as can addition
c0_sv3 += [2.4]
c0_sv4 = c0_sv3 + [2.5, 2.6]
assert list(c0_sv3) == [2, 2.1, 2.2, 2.3, 2.4]
assert list(c0_sv4) == [2, 2.1, 2.2, 2.3, 2.4, 2.5, 2.6]
# len
assert len(c0_sv3) == 5
# in-place and copying reverse
c0_sv.reverse()
c0_sv5 = reversed(c0_sv)
assert list(c0_sv) == [1.7, 1.6, 1.5, 1]
assert list(c0_sv5) == [1, 1.5, 1.6, 1.7]
# multi-key init, where first key is itself a list
c0_sv6 = c0[[1, 3], 4]
# copying
c0_sv7 = c0_sv6.copy()
assert list(c0_sv6) == [1, 3, 4]
assert list(c0_sv7) == [1, 3, 4]
assert c0_sv6 is not c0_sv7
def test_base() -> None:
p = Parameter('p', get_cmd=None, set_cmd=None)
with pytest.raises(NotImplementedError):
iter(SweepValues(p))
def test_snapshot(c0) -> None:
assert c0[0].snapshot() == {"parameter": c0.snapshot(), "values": [{"item": 0}]}
assert c0[0:5:0.3].snapshot()["values"] == [ # type: ignore[misc]
{"first": 0, "last": 4.8, "num": 17, "type": "linear"}
]
sv = c0.sweep(start=2, stop=4, num=5)
assert sv.snapshot()['values'] == [{
'first': 2,
'last': 4,
'num': 5,
'type': 'linear'
}]
# mixture of bare items, nested lists, and slices
sv = c0[1, 7, 3.2, [1, 2, 3], 6:9:1, -4.5, 5.3]
assert sv.snapshot()['values'] == [{
'first': 1,
'last': 5.3,
'min': -4.5,
'max': 8,
'num': 11,
'type': 'sequence'
}]
assert (c0[0] + c0[1]).snapshot()['values'] == [
{'item': 0},
{'item': 1}
]
assert (c0[0:3:1] + c0[4, 6, 9]).snapshot()['values'] == [
{'first': 0, 'last': 2, 'num': 3, 'type': 'linear'},
{'first': 4, 'last': 9, 'min': 4, 'max': 9, 'num': 3,
'type': 'sequence'}
]
def METHOD_NAME(c0) -> None:
sv = c0[0]
assert repr(sv) == (
f"<qcodes.parameters.sweep_values.SweepFixedValues: c0 at {id(sv)}>"
) |
40 | make layer | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from mmcv.cnn import ConvModule
from torch.nn.modules.batchnorm import _BatchNorm
from mmpose.registry import MODELS
from .base_backbone import BaseBackbone
from .utils import InvertedResidual
@MODELS.register_module()
class MobileNetV3(BaseBackbone):
"""MobileNetV3 backbone.
Args:
arch (str): Architecture of mobilnetv3, from {small, big}.
Default: small.
conv_cfg (dict): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
out_indices (None or Sequence[int]): Output from which stages.
Default: (-1, ), which means output tensors from final stage.
frozen_stages (int): Stages to be frozen (all param fixed).
Default: -1, which means not freezing any parameters.
norm_eval (bool): Whether to set norm layers to eval mode, namely,
freeze running stats (mean and var). Note: Effect on Batch Norm
and its variants only. Default: False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default:
``[
dict(type='Kaiming', layer=['Conv2d']),
dict(
type='Constant',
val=1,
layer=['_BatchNorm'])
]``
"""
# Parameters to build each block:
# [kernel size, mid channels, out channels, with_se, act type, stride]
arch_settings = {
'small': [[3, 16, 16, True, 'ReLU', 2],
[3, 72, 24, False, 'ReLU', 2],
[3, 88, 24, False, 'ReLU', 1],
[5, 96, 40, True, 'HSwish', 2],
[5, 240, 40, True, 'HSwish', 1],
[5, 240, 40, True, 'HSwish', 1],
[5, 120, 48, True, 'HSwish', 1],
[5, 144, 48, True, 'HSwish', 1],
[5, 288, 96, True, 'HSwish', 2],
[5, 576, 96, True, 'HSwish', 1],
[5, 576, 96, True, 'HSwish', 1]],
'big': [[3, 16, 16, False, 'ReLU', 1],
[3, 64, 24, False, 'ReLU', 2],
[3, 72, 24, False, 'ReLU', 1],
[5, 72, 40, True, 'ReLU', 2],
[5, 120, 40, True, 'ReLU', 1],
[5, 120, 40, True, 'ReLU', 1],
[3, 240, 80, False, 'HSwish', 2],
[3, 200, 80, False, 'HSwish', 1],
[3, 184, 80, False, 'HSwish', 1],
[3, 184, 80, False, 'HSwish', 1],
[3, 480, 112, True, 'HSwish', 1],
[3, 672, 112, True, 'HSwish', 1],
[5, 672, 160, True, 'HSwish', 1],
[5, 672, 160, True, 'HSwish', 2],
[5, 960, 160, True, 'HSwish', 1]]
} # yapf: disable
def __init__(self,
arch='small',
conv_cfg=None,
norm_cfg=dict(type='BN'),
out_indices=(-1, ),
frozen_stages=-1,
norm_eval=False,
with_cp=False,
init_cfg=[
dict(type='Kaiming', layer=['Conv2d']),
dict(type='Constant', val=1, layer=['_BatchNorm'])
]):
# Protect mutable default arguments
norm_cfg = copy.deepcopy(norm_cfg)
super().__init__(init_cfg=init_cfg)
assert arch in self.arch_settings
for index in out_indices:
if index not in range(-len(self.arch_settings[arch]),
len(self.arch_settings[arch])):
raise ValueError('the item in out_indices must in '
f'range(0, {len(self.arch_settings[arch])}). '
f'But received {index}')
if frozen_stages not in range(-1, len(self.arch_settings[arch])):
raise ValueError('frozen_stages must be in range(-1, '
f'{len(self.arch_settings[arch])}). '
f'But received {frozen_stages}')
self.arch = arch
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.out_indices = out_indices
self.frozen_stages = frozen_stages
self.norm_eval = norm_eval
self.with_cp = with_cp
self.in_channels = 16
self.conv1 = ConvModule(
in_channels=3,
out_channels=self.in_channels,
kernel_size=3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=dict(type='HSwish'))
self.layers = self.METHOD_NAME()
self.feat_dim = self.arch_settings[arch][-1][2]
def METHOD_NAME(self):
layers = []
layer_setting = self.arch_settings[self.arch]
for i, params in enumerate(layer_setting):
(kernel_size, mid_channels, out_channels, with_se, act,
stride) = params
if with_se:
se_cfg = dict(
channels=mid_channels,
ratio=4,
act_cfg=(dict(type='ReLU'),
dict(type='HSigmoid', bias=1.0, divisor=2.0)))
else:
se_cfg = None
layer = InvertedResidual(
in_channels=self.in_channels,
out_channels=out_channels,
mid_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
se_cfg=se_cfg,
with_expand_conv=True,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=dict(type=act),
with_cp=self.with_cp)
self.in_channels = out_channels
layer_name = f'layer{i + 1}'
self.add_module(layer_name, layer)
layers.append(layer_name)
return layers
def forward(self, x):
x = self.conv1(x)
outs = []
for i, layer_name in enumerate(self.layers):
layer = getattr(self, layer_name)
x = layer(x)
if i in self.out_indices or \
i - len(self.layers) in self.out_indices:
outs.append(x)
return tuple(outs)
def _freeze_stages(self):
if self.frozen_stages >= 0:
for param in self.conv1.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
layer = getattr(self, f'layer{i}')
layer.eval()
for param in layer.parameters():
param.requires_grad = False
def train(self, mode=True):
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval() |
41 | get slope inter | # emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Read / write access to SPM2 version of analyze image format"""
import numpy as np
from . import spm99analyze as spm99 # module import
image_dimension_dtd = spm99.image_dimension_dtd[:]
image_dimension_dtd[image_dimension_dtd.index(('funused2', 'f4'))] = ('scl_inter', 'f4')
# Full header numpy dtype combined across sub-fields
header_dtype = np.dtype(spm99.header_key_dtd + image_dimension_dtd + spm99.data_history_dtd)
class Spm2AnalyzeHeader(spm99.Spm99AnalyzeHeader):
"""Class for SPM2 variant of basic Analyze header
SPM2 variant adds the following to basic Analyze format:
* voxel origin;
* slope scaling of data;
* reading - but not writing - intercept of data.
"""
# Copies of module level definitions
template_dtype = header_dtype
def METHOD_NAME(self):
"""Get data scaling (slope) and intercept from header data
Uses the algorithm from SPM2 spm_vol_ana.m by John Ashburner
Parameters
----------
self : header
Mapping with fields:
* scl_slope - slope
* scl_inter - possible intercept (SPM2 use - shared by nifti)
* glmax - the (recorded) maximum value in the data (unscaled)
* glmin - recorded minimum unscaled value
* cal_max - the calibrated (scaled) maximum value in the dataset
* cal_min - ditto minimum value
Returns
-------
scl_slope : None or float
slope. None if there is no valid scaling from these fields
scl_inter : None or float
intercept. Also None if there is no valid slope, intercept
Examples
--------
>>> fields = {'scl_slope': 1, 'scl_inter': 0, 'glmax': 0, 'glmin': 0,
... 'cal_max': 0, 'cal_min': 0}
>>> hdr = Spm2AnalyzeHeader()
>>> for key, value in fields.items():
... hdr[key] = value
>>> hdr.get_slope_inter()
(1.0, 0.0)
>>> hdr['scl_inter'] = 0.5
>>> hdr.get_slope_inter()
(1.0, 0.5)
>>> hdr['scl_inter'] = np.nan
>>> hdr.get_slope_inter()
(1.0, 0.0)
If 'scl_slope' is 0, nan or inf, cannot use 'scl_slope'.
Without valid information in the gl / cal fields, we cannot get
scaling, and return None
>>> hdr['scl_slope'] = 0
>>> hdr.get_slope_inter()
(None, None)
>>> hdr['scl_slope'] = np.nan
>>> hdr.get_slope_inter()
(None, None)
Valid information in the gl AND cal fields are needed
>>> hdr['cal_max'] = 0.8
>>> hdr['cal_min'] = 0.2
>>> hdr.get_slope_inter()
(None, None)
>>> hdr['glmax'] = 110
>>> hdr['glmin'] = 10
>>> np.allclose(hdr.get_slope_inter(), [0.6/100, 0.2-0.6/100*10])
True
"""
# get scaling factor from 'scl_slope' (funused1)
slope = float(self['scl_slope'])
if np.isfinite(slope) and slope:
# try to get offset from scl_inter
inter = float(self['scl_inter'])
if not np.isfinite(inter):
inter = 0.0
return slope, inter
# no non-zero and finite scaling, try gl/cal fields
unscaled_range = self['glmax'] - self['glmin']
scaled_range = self['cal_max'] - self['cal_min']
if unscaled_range and scaled_range:
slope = float(scaled_range) / unscaled_range
inter = self['cal_min'] - slope * self['glmin']
return slope, inter
return None, None
@classmethod
def may_contain_header(klass, binaryblock):
if len(binaryblock) < klass.sizeof_hdr:
return False
hdr_struct = np.ndarray(
shape=(), dtype=header_dtype, buffer=binaryblock[: klass.sizeof_hdr]
)
bs_hdr_struct = hdr_struct.byteswap()
return binaryblock[344:348] not in (b'ni1\x00', b'n+1\x00') and 348 in (
hdr_struct['sizeof_hdr'],
bs_hdr_struct['sizeof_hdr'],
)
class Spm2AnalyzeImage(spm99.Spm99AnalyzeImage):
"""Class for SPM2 variant of basic Analyze image"""
header_class = Spm2AnalyzeHeader
header: Spm2AnalyzeHeader
load = Spm2AnalyzeImage.from_filename
save = Spm2AnalyzeImage.instance_to_filename |
42 | test single with none value | '''
Faraday Penetration Test IDE
Copyright (C) 2013 Infobyte LLC (http://www.infobytesec.com/)
See the file 'doc/LICENSE' for the license information
'''
import time
import datetime
import pytest
from collections import namedtuple
from marshmallow import Schema, fields, ValidationError
from faraday.server.schemas import (
JSTimestampField,
NullToBlankString,
MutableField,
PrimaryKeyRelatedField,
SelfNestedField,
)
Place = namedtuple('Place', ['name', 'x', 'y'])
class PointSchema(Schema):
x = fields.Float(required=True)
y = fields.Float(required=True)
class PlaceSchema(Schema):
name = fields.Str()
coords = SelfNestedField(PointSchema())
class TestSelfNestedField:
def load(self, data, schema=PlaceSchema):
return schema().load(data)
def test_field_serialization(self):
point = Place('home', 123, 456.1)
schema = PlaceSchema()
dumped = schema.dump(point)
assert dumped == {"name": "home", "coords": {"x": 123.0, "y": 456.1}}
def test_deserialization_success(self):
load = PlaceSchema().load({"coords": {"x": 123.0, "y": 456.1}})
assert load == {"coords": {"x": 123.0, "y": 456.1}}
@pytest.mark.parametrize('data', [
{"coords": {"x": 1}},
{"coords": {"x": None, "y": 2}},
{"coords": {"x": "xxx", "y": 2}},
])
def test_deserialization_fails(self, data):
with pytest.raises(ValidationError):
self.load(data)
class TestJSTimestampField:
def test_parses_current_datetime(self):
ts = time.time()
dt = datetime.datetime.fromtimestamp(ts)
parsed = JSTimestampField()._serialize(dt, None, None)
assert parsed == int(ts) * 1000
assert isinstance(parsed, int)
def test_parses_null_datetime(self):
assert JSTimestampField()._serialize(None, None, None) is None
def test_deserialization_fails(self):
ts = time.time()
dt = datetime.datetime.fromtimestamp(ts)
loaded = JSTimestampField()._deserialize(ts * 1000,
None,
None)
assert isinstance(loaded, datetime.date)
assert abs(loaded - dt) < datetime.timedelta(seconds=60)
User = namedtuple('User', ['username', 'blogposts'])
Blogpost = namedtuple('Blogpost', ['id', 'title'])
Profile = namedtuple('Profile', ['user', 'first_name'])
class UserSchema(Schema):
username = fields.String()
blogposts = PrimaryKeyRelatedField(many=True)
class ProfileSchema(Schema):
user = PrimaryKeyRelatedField('username')
first_name = fields.String()
class TestPrimaryKeyRelatedField:
@pytest.fixture(autouse=True)
def load_data(self):
self.blogposts = [
Blogpost(1, 'aaa'),
Blogpost(2, 'bbb'),
Blogpost(3, 'ccc'),
]
self.user = User('test', self.blogposts)
self.profile = Profile(self.user, 'david')
def serialize(self, obj=None, schema=UserSchema):
return schema().dump(obj or self.user)
def test_many_id(self):
assert self.serialize() == {"username": "test",
"blogposts": [1, 2, 3]}
def test_many_title(self):
class UserSchemaWithTitle(UserSchema):
blogposts = PrimaryKeyRelatedField('title', many=True)
data = self.serialize(schema=UserSchemaWithTitle)
assert data == {"username": "test", "blogposts": ['aaa', 'bbb', 'ccc']}
def test_single(self):
assert self.serialize(self.profile, ProfileSchema) == {
"user": "test",
"first_name": "david"
}
def METHOD_NAME(self):
assert self.serialize(Profile(None, 'other'), ProfileSchema) == {
"user": None,
"first_name": "other"
}
def test_deserialization_fails(self):
with pytest.raises(NotImplementedError):
UserSchema().load({"username": "test",
"blogposts": [1, 2, 3]})
Blogpost2 = namedtuple('Blogpost', ['id', 'title', 'user'])
class Blogpost2Schema(Schema):
id = fields.Integer()
title = fields.String()
user = MutableField(fields.Nested(UserSchema, only=('username',)),
fields.String())
class TestMutableField:
serialized_data = {"id": 1, "title": "test", "user": {"username": "john"}}
loaded_data = {"id": 1, "title": "test", "user": "john"}
@pytest.fixture(autouse=True)
def load_data(self):
self.user = User('john', []) # I don't care for the user's blogposts
self.blogpost = Blogpost2(1, 'test', self.user)
def serialize(self, obj=None, schema=Blogpost2Schema):
return schema().dump(obj or self.blogpost)
def load(self, data, schema=Blogpost2Schema):
return schema().load(data)
def test_serialize(self):
assert self.serialize() == self.serialized_data
def test_deserialize(self):
assert self.load(self.loaded_data) == self.loaded_data
def test_deserialize_fails(self):
with pytest.raises(ValidationError):
self.load(self.serialized_data)
def test_required_propagation(self):
read_field = fields.String()
write_field = fields.Float()
mutable = MutableField(read_field, write_field, required=True)
assert mutable.required
assert read_field.required
assert write_field.required
def test_load_method_field(self):
class PlaceSchema(Schema):
name = fields.String()
x = MutableField(fields.Method('get_x'),
fields.String())
def get_x(self, obj):
return 5
assert self.serialize(Place('test', 1, 1), PlaceSchema) == {
"name": "test",
"x": 5,
}
class TestNullToBlankString:
class NullToBlankSchema(Schema):
string = NullToBlankString(missing='test')
def test_load_simple_string(self):
data = self.NullToBlankSchema().load({'string': 'hello'})
assert data['string'] == 'hello'
def test_load_string_with_null_bytes(self):
data = self.NullToBlankSchema().load({'string': 'hello\0world'})
assert data['string'] == 'helloworld'
def test_load_default_string(self):
data = self.NullToBlankSchema().load({})
assert data['string'] == 'test'
def test_translate_none_to_empty_string(self):
data = self.NullToBlankSchema().load({'string': None})
assert data['string'] == '' |
43 | print info | from lxml import html
import requests
from bs4 import BeautifulSoup
import sys
import os
import re
import time
REGEX = '\s*([\d.]+)'
count = 0
#this code prints out information (vulnerability ID, description, severity, and link) for all the vulnerabilities for a given dependency passed in through command line
def usage(code=0):
print('''Usage: {} [options] component_name version
Choose a component and version to see any/all vulnerabilities
'''.format(os.path.basename(sys.argv[0])))
sys.exit(code)
#returns parsed items with the desired tag from website passed in
def returnSoupItemsDesc(link):
results = requests.get(link)
resultsContent = results.content
#creates a list of website's parsed content
soup = BeautifulSoup(resultsContent, 'xml')
return soup
def METHOD_NAME(soup_items, link):
print('Potential vulnerabilities found at ' + time.strftime("%Y-%m-%d %H:%M"))
cvss_versions = soup_items.find_all('span', attrs={'data-testid':'page-header-vuln-id'})
for version in cvss_versions:
print('vulnerability id: {}\n'.format(version.text))
descriptions = soup_items.find_all('p', attrs={'data-testid':'vuln-analysis-description'})
for description in descriptions:
print('description: {}\n'.format(description.text))
version3_severity = soup_items.find_all('span', attrs={'data-testid':'vuln-cvssv3-base-score-severity'})
if len(version3_severity):
for severity in version3_severity:
print('version 3 severity: {}\n'.format(severity.text))
version2_severity = soup_items.find_all('span', attrs={'data-testid':'vuln-cvssv2-base-score-severity'})
if len(version2_severity):
for severity in version2_severity:
print('version 2 severity: {}\n'.format(severity.text))
print ('link to full description: {}\n'.format(link))
def version_cmp(version1, version2):
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
def cmp(a, b):
return (a > b) - (a < b)
def do_it_all(link):
soup_items = returnSoupItemsDesc(link)
links = soup_items.find_all('a')
#loops through all lines of html code with the <a> tag
for item in links:
if 'CVE' in item.text:
#constructs link for one of the vulnerabilities
cve_link = 'https://nvd.nist.gov{}'.format(item.get('href'))
cve_soup_items = returnSoupItemsDesc(cve_link)
rows = cve_soup_items.find_all('tr', class_='vulnerable')
if(len(rows)>0):
last_columns = rows[len(rows)-1].findChildren('td')
num_columns = len(last_columns)
if dependency in last_columns[0].text:
#no version
if(no_version):
METHOD_NAME(cve_soup_items, cve_link)
count = 1
#check version from column 1 (no 'up to' or 'from' columns)
elif (num_columns<2 or num_columns>3):
version_block = last_columns[0].text
try:
version = re.search('\s*([\d.]+).*?(\s*([\d.]+))', version_block).group(2)
if(version_cmp(version,user_version)>=0):
METHOD_NAME(cve_soup_items, cve_link)
count = 1
except IndexError:
METHOD_NAME(cve_soup_items, cve_link)
count = 1
elif (num_columns ==2):
version_block = last_columns[1].text
#\s*([\d.]+)
version = re.search(REGEX, version_block).group(1)
inc_or_exc = re.search('(inc|exc)', version_block).group(1)
if (inc_or_exc == 'inc'):
if (version_cmp(version,user_version)>=0):
METHOD_NAME(cve_soup_items, cve_link)
count = 1
elif (inc_or_exc == 'exc'):
if (version_cmp(version,user_version)>0):
METHOD_NAME(cve_soup_items, cve_link)
count = 1
else:
version_block = last_columns[2].text
#\s*([\d.]+)
version_high = re.search(REGEX, version_block).group(1)
version_block_first = rows[0].findChildren('td')[1]
version_low = re.search(REGEX, version_block_first.text).group(1)
end = False
#if user_version is outside of version range
if(version_cmp(version_high,user_version)<0 or version_cmp(version_low, user_version)>0):
end = True
#not outside of range and only one row
elif(len(rows)==1):
METHOD_NAME(cve_soup_items, cve_link)
count = 1
end = True
#more than 1 row
current_row = 0
current_col = 1
# print('2 +rows')
while not end:
columns = rows[current_row].findChildren('td')
#version less than up to of first row
if version_cmp(re.search(REGEX,columns[current_col+1].text).group(1),user_version)>0:
METHOD_NAME(cve_soup_items, cve_link)
count = 1
end = True
#version less than from
elif version_cmp(re.search(REGEX,rows[current_row+1].findChildren('td')[current_col].text).group(1),user_version)>0:
end = True
#check next row
else:
current_row = current_row + 1
if count == 0:
print('No potential vulnerabilities found at ' + time.strftime("%Y-%m-%d %H:%M"))
if len(sys.argv[1:]) == 2:
dependency = sys.argv[1]
user_version = sys.argv[2]
link= 'https://nvd.nist.gov/vuln/search/results?form_type=Basic&results_type=overview&query={}&search_type=all'.format(dependency)
dependency=dependency.replace("+","_")
no_version = False
elif len(sys.argv[1:])==1:
dependency = sys.argv[1]
link= 'https://nvd.nist.gov/vuln/search/results?form_type=Basic&results_type=overview&query={}&search_type=all'.format(dependency)
dependency=dependency.replace("+","_")
no_version = True
else:
usage(1)
if __name__ == '__main__':
do_it_all(link)
sys.exit(0 |
44 | get field | # -*- coding: utf-8 -*-
# Copyright (c) 2022, Jonathan Lung <lungj@heresjono.com>
# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
# SPDX-License-Identifier: GPL-3.0-or-later
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
name: bitwarden
author:
- Jonathan Lung (@lungj) <lungj@heresjono.com>
requirements:
- bw (command line utility)
- be logged into bitwarden
- bitwarden vault unlocked
- E(BW_SESSION) environment variable set
short_description: Retrieve secrets from Bitwarden
version_added: 5.4.0
description:
- Retrieve secrets from Bitwarden.
options:
_terms:
description: Key(s) to fetch values for from login info.
required: true
type: list
elements: str
search:
description: Field to retrieve, for example V(name) or V(id).
type: str
default: name
version_added: 5.7.0
field:
description: Field to fetch. Leave unset to fetch whole response.
type: str
collection_id:
description: Collection ID to filter results by collection. Leave unset to skip filtering.
type: str
version_added: 6.3.0
"""
EXAMPLES = """
- name: "Get 'password' from Bitwarden record named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='password') }}
- name: "Get 'password' from Bitwarden record with id 'bafba515-af11-47e6-abe3-af1200cd18b2'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'bafba515-af11-47e6-abe3-af1200cd18b2', search='id', field='password') }}
- name: "Get 'password' from Bitwarden record named 'a_test' from collection"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='password', collection_id='bafba515-af11-47e6-abe3-af1200cd18b2') }}
- name: "Get full Bitwarden record named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test') }}
- name: "Get custom field 'api_key' from Bitwarden record named 'a_test'"
ansible.builtin.debug:
msg: >-
{{ lookup('community.general.bitwarden', 'a_test', field='api_key') }}
"""
RETURN = """
_raw:
description: List of requested field or JSON object of list of matches.
type: list
elements: raw
"""
from subprocess import Popen, PIPE
from ansible.errors import AnsibleError
from ansible.module_utils.common.text.converters import to_bytes, to_text
from ansible.parsing.ajson import AnsibleJSONDecoder
from ansible.plugins.lookup import LookupBase
class BitwardenException(AnsibleError):
pass
class Bitwarden(object):
def __init__(self, path='bw'):
self._cli_path = path
@property
def cli_path(self):
return self._cli_path
@property
def unlocked(self):
out, err = self._run(['status'], stdin="")
decoded = AnsibleJSONDecoder().raw_decode(out)[0]
return decoded['status'] == 'unlocked'
def _run(self, args, stdin=None, expected_rc=0):
p = Popen([self.cli_path] + args, stdout=PIPE, stderr=PIPE, stdin=PIPE)
out, err = p.communicate(to_bytes(stdin))
rc = p.wait()
if rc != expected_rc:
raise BitwardenException(err)
return to_text(out, errors='surrogate_or_strict'), to_text(err, errors='surrogate_or_strict')
def _get_matches(self, search_value, search_field, collection_id):
"""Return matching records whose search_field is equal to key.
"""
# Prepare set of params for Bitwarden CLI
params = ['list', 'items', '--search', search_value]
if collection_id:
params.extend(['--collectionid', collection_id])
out, err = self._run(params)
# This includes things that matched in different fields.
initial_matches = AnsibleJSONDecoder().raw_decode(out)[0]
# Filter to only include results from the right field.
return [item for item in initial_matches if item[search_field] == search_value]
def METHOD_NAME(self, field, search_value, search_field="name", collection_id=None):
"""Return a list of the specified field for records whose search_field match search_value
and filtered by collection if collection has been provided.
If field is None, return the whole record for each match.
"""
matches = self._get_matches(search_value, search_field, collection_id)
if not field:
return matches
field_matches = []
for match in matches:
# if there are no custom fields, then `match` has no key 'fields'
if 'fields' in match:
custom_field_found = False
for custom_field in match['fields']:
if field == custom_field['name']:
field_matches.append(custom_field['value'])
custom_field_found = True
break
if custom_field_found:
continue
if 'login' in match and field in match['login']:
field_matches.append(match['login'][field])
continue
if field in match:
field_matches.append(match[field])
continue
if matches and not field_matches:
raise AnsibleError("field {field} does not exist in {search_value}".format(field=field, search_value=search_value))
return field_matches
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
self.set_options(var_options=variables, direct=kwargs)
field = self.get_option('field')
search_field = self.get_option('search')
collection_id = self.get_option('collection_id')
if not _bitwarden.unlocked:
raise AnsibleError("Bitwarden Vault locked. Run 'bw unlock'.")
return [_bitwarden.METHOD_NAME(field, term, search_field, collection_id) for term in terms]
_bitwarden = Bitwarden() |
45 | setup method | # coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 9
# Methods Covered : 9
# Examples Total : 12
# Examples Tested : 12
# Coverage % : 100
# ----------------------
# firewall_policies: 5/5
# firewall_policy_rule_groups: 4/4
import unittest
import pytest
import azure.mgmt.network
from devtools_testutils import AzureMgmtRecordedTestCase, RandomNameResourceGroupPreparer, recorded_by_proxy
AZURE_LOCATION = 'eastus'
class TestMgmtNetwork(AzureMgmtRecordedTestCase):
def METHOD_NAME(self, method):
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.network.NetworkManagementClient
)
@RandomNameResourceGroupPreparer(location=AZURE_LOCATION)
@recorded_by_proxy
def test_network(self, resource_group):
SUBSCRIPTION_ID = self.get_settings_value("SUBSCRIPTION_ID")
RESOURCE_GROUP = resource_group.name
FIREWALL_POLICY_NAME = "myFirewallPolicy"
RULE_GROUP_NAME = "myRuleGroup"
# /FirewallPolicies/put/Create FirewallPolicy[put]
BODY = {
"tags": {
"key1": "value1"
},
"location": "West US",
"threat_intel_mode": "Alert"
}
result = self.mgmt_client.firewall_policies.begin_create_or_update(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME, parameters=BODY)
result = result.result()
# # /FirewallPolicyRuleGroups/put/Create FirewallPolicyRuleGroup With IpGroups[put]
# BODY = {
# "priority": "110",
# "rules": [
# {
# "rule_type": "FirewallPolicyFilterRule",
# "name": "Example-Filter-Rule",
# "action": {
# "type": "Deny"
# },
# "rule_conditions": [
# {
# "rule_condition_type": "NetworkRuleCondition",
# "name": "network-condition1",
# "ip_protocols": [
# "TCP"
# ],
# "destination_ports": [
# "*"
# ],
# "source_ip_groups": [
# "/subscriptions/subid/providers/Microsoft.Network/resourceGroup/rg1/ipGroups/ipGroups1"
# ],
# "destination_ip_groups": [
# "/subscriptions/subid/providers/Microsoft.Network/resourceGroup/rg1/ipGroups/ipGroups2"
# ]
# }
# ]
# }
# ]
# }
# result = self.mgmt_client.firewall_policy_rule_groups.create_or_update(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME, rule_group_name=RULE_GROUP_NAME, parameters=BODY)
# result = result.result()
# /FirewallPolicyRuleGroups/put/Create FirewallPolicyRuleGroup[put]
BODY = {
"priority": "110",
"rules": [
{
"rule_type": "FirewallPolicyFilterRule",
"name": "Example-Filter-Rule",
"action": {
"type": "Deny"
},
"rule_conditions": [
{
"rule_condition_type": "NetworkRuleCondition",
"name": "network-condition1",
"source_addresses": [
"10.1.25.0/24"
],
"destination_addresses": [
"*"
],
"ip_protocols": [
"TCP"
],
"destination_ports": [
"*"
]
}
]
}
]
}
result = self.mgmt_client.firewall_policy_rule_groups.begin_create_or_update(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME, rule_group_name=RULE_GROUP_NAME, parameters=BODY)
result = result.result()
# /FirewallPolicyRuleGroups/get/Get FirewallPolicyRuleGroup With IpGroups[get]
# result = self.mgmt_client.firewall_policy_rule_groups.get(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME, rule_group_name=RULE_GROUP_NAME)
# /FirewallPolicyRuleGroups/get/Get FirewallPolicyRuleGroup[get]
result = self.mgmt_client.firewall_policy_rule_groups.get(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME, rule_group_name=RULE_GROUP_NAME)
# /FirewallPolicyRuleGroups/get/List all FirewallPolicyRuleGroups with IpGroups for a given FirewallPolicy[get]
# result = self.mgmt_client.firewall_policy_rule_groups.list(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME)
# /FirewallPolicyRuleGroups/get/List all FirewallPolicyRuleGroups for a given FirewallPolicy[get]
result = self.mgmt_client.firewall_policy_rule_groups.list(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME)
# /FirewallPolicies/get/Get FirewallPolicy[get]
result = self.mgmt_client.firewall_policies.get(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME)
# /FirewallPolicies/get/List all Firewall Policies for a given resource group[get]
result = self.mgmt_client.firewall_policies.list(resource_group_name=RESOURCE_GROUP)
# /FirewallPolicies/get/List all Firewall Policies for a given subscription[get]
result = self.mgmt_client.firewall_policies.list_all()
# /FirewallPolicyRuleGroups/delete/Delete FirewallPolicyRuleGroup[delete]
result = self.mgmt_client.firewall_policy_rule_groups.begin_delete(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME, rule_group_name=RULE_GROUP_NAME)
result = result.result()
# /FirewallPolicies/delete/Delete Firewall Policy[delete]
result = self.mgmt_client.firewall_policies.begin_delete(resource_group_name=RESOURCE_GROUP, firewall_policy_name=FIREWALL_POLICY_NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main() |
46 | all time | from __future__ import annotations
import datetime
import logging
from dataclasses import dataclass
from typing import Optional
import pandas as pd
from dbt_semantic_interfaces.dataclass_serialization import SerializableDataclass
from dbt_semantic_interfaces.type_enums.time_granularity import TimeGranularity
from metricflow.time.time_granularity import offset_period
logger = logging.getLogger(__name__)
@dataclass(frozen=True)
class TimeRangeConstraint(SerializableDataclass):
"""Describes how the time dimension for metrics should be constrained."""
start_time: datetime.datetime
end_time: datetime.datetime
def __post_init__(self) -> None: # noqa: D
if self.start_time > self.end_time:
logger.warning(f"start_time must not be > end_time. start_time={self.start_time} end_time={self.end_time}")
if self.start_time < TimeRangeConstraint.ALL_TIME_BEGIN():
logger.warning(f"start_time={self.start_time} exceeds the limits of {TimeRangeConstraint.ALL_TIME_BEGIN()}")
if self.end_time > TimeRangeConstraint.ALL_TIME_END():
raise RuntimeError(f"end_time={self.end_time} exceeds the limits of {TimeRangeConstraint.ALL_TIME_END()}")
@staticmethod
def ALL_TIME_BEGIN() -> datetime.datetime: # noqa: D
return datetime.datetime(2000, 1, 1)
@staticmethod
def ALL_TIME_END() -> datetime.datetime: # noqa: D
return datetime.datetime(2040, 12, 31)
@staticmethod
def METHOD_NAME() -> TimeRangeConstraint:
"""Return the range representing all time.
This could also be represented with None as the ends, but doing this makes the logic simpler in many cases.
"""
return TimeRangeConstraint(
start_time=TimeRangeConstraint.ALL_TIME_BEGIN(),
end_time=TimeRangeConstraint.ALL_TIME_END(),
)
@staticmethod
def empty_time() -> TimeRangeConstraint:
"""Return the range representing no time."""
return TimeRangeConstraint(
start_time=TimeRangeConstraint.ALL_TIME_BEGIN(),
end_time=TimeRangeConstraint.ALL_TIME_BEGIN(),
)
def _adjust_time_constraint_start_by_window(
self,
time_granularity: TimeGranularity,
time_unit_count: int,
) -> TimeRangeConstraint:
"""Moves the start of the time constraint back by 1 window.
if the metric is weekly-active-users (ie window = 1 week) it moves time_constraint.start one week earlier
"""
start_ts = pd.Timestamp(self.start_time)
offset = offset_period(time_granularity) * time_unit_count
adjusted_start = (start_ts - offset).to_pydatetime()
return TimeRangeConstraint(
start_time=adjusted_start,
end_time=self.end_time,
)
def adjust_time_constraint_for_cumulative_metric(
self, granularity: Optional[TimeGranularity], count: int
) -> TimeRangeConstraint:
"""Given a time constraint for the overall query, adjust it to cover the time range for this metric."""
if granularity is not None:
return self._adjust_time_constraint_start_by_window(granularity, count - 1)
# if no window is specified we want to accumulate from the beginning of time
return TimeRangeConstraint(
start_time=TimeRangeConstraint.ALL_TIME_BEGIN(),
end_time=self.end_time,
)
def is_subset_of(self, other: TimeRangeConstraint) -> bool: # noqa: D
return self.start_time >= other.start_time and self.end_time <= other.end_time
def __str__(self) -> str: # noqa: D
return f"[{self.start_time.isoformat()}, {self.end_time.isoformat()}]"
def __repr__(self) -> str: # noqa: D
return (
f"{self.__class__.__name__}(start_time='{self.start_time.isoformat()}', "
f"end_time='{self.end_time.isoformat()}')"
)
def intersection(self, other: TimeRangeConstraint) -> TimeRangeConstraint: # noqa: D
# self is completely before the other
if self.end_time < other.start_time:
return TimeRangeConstraint.empty_time()
# self starts before the start of other, and self ends within other
elif self.start_time <= other.start_time <= self.end_time <= other.end_time:
return TimeRangeConstraint(
start_time=other.start_time,
end_time=self.end_time,
)
# self starts before the start of other, and self ends after other
elif self.start_time <= other.start_time <= other.end_time <= self.end_time:
return other
# self starts after the start of other, and self ends within other:
elif other.start_time <= self.start_time <= self.end_time <= other.end_time:
return self
# self starts after the start of other, and self ends after other:
elif other.start_time <= self.start_time <= other.end_time <= self.end_time:
return TimeRangeConstraint(
start_time=self.start_time,
end_time=other.end_time,
)
# self is completely after other
elif self.start_time > other.end_time:
return TimeRangeConstraint.empty_time()
else:
raise RuntimeError(f"Unhandled case - self: {self} other: {other}") |
47 | get column number | """Different kinds of SAX Exceptions"""
import sys
if sys.platform[:4] == "java":
from java.lang import Exception
del sys
# ===== SAXEXCEPTION =====
class SAXException(Exception):
"""Encapsulate an XML error or warning. This class can contain
basic error or warning information from either the XML parser or
the application: you can subclass it to provide additional
functionality, or to add localization. Note that although you will
receive a SAXException as the argument to the handlers in the
ErrorHandler interface, you are not actually required to raise
the exception; instead, you can simply read the information in
it."""
def __init__(self, msg, exception=None):
"""Creates an exception. The message is required, but the exception
is optional."""
self._msg = msg
self._exception = exception
Exception.__init__(self, msg)
def getMessage(self):
"Return a message for this exception."
return self._msg
def getException(self):
"Return the embedded exception, or None if there was none."
return self._exception
def __str__(self):
"Create a string representation of the exception."
return self._msg
def __getitem__(self, ix):
"""Avoids weird error messages if someone does exception[ix] by
mistake, since Exception has __getitem__ defined."""
raise AttributeError("__getitem__")
# ===== SAXPARSEEXCEPTION =====
class SAXParseException(SAXException):
"""Encapsulate an XML parse error or warning.
This exception will include information for locating the error in
the original XML document. Note that although the application will
receive a SAXParseException as the argument to the handlers in the
ErrorHandler interface, the application is not actually required
to raise the exception; instead, it can simply read the
information in it and take a different action.
Since this exception is a subclass of SAXException, it inherits
the ability to wrap another exception."""
def __init__(self, msg, exception, locator):
"Creates the exception. The exception parameter is allowed to be None."
SAXException.__init__(self, msg, exception)
self._locator = locator
# We need to cache this stuff at construction time.
# If this exception is raised, the objects through which we must
# traverse to get this information may be deleted by the time
# it gets caught.
self._systemId = self._locator.getSystemId()
self._colnum = self._locator.METHOD_NAME()
self._linenum = self._locator.getLineNumber()
def METHOD_NAME(self):
"""The column number of the end of the text where the exception
occurred."""
return self._colnum
def getLineNumber(self):
"The line number of the end of the text where the exception occurred."
return self._linenum
def getPublicId(self):
"Get the public identifier of the entity where the exception occurred."
return self._locator.getPublicId()
def getSystemId(self):
"Get the system identifier of the entity where the exception occurred."
return self._systemId
def __str__(self):
"Create a string representation of the exception."
sysid = self.getSystemId()
if sysid is None:
sysid = "<unknown>"
linenum = self.getLineNumber()
if linenum is None:
linenum = "?"
colnum = self.METHOD_NAME()
if colnum is None:
colnum = "?"
return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg)
# ===== SAXNOTRECOGNIZEDEXCEPTION =====
class SAXNotRecognizedException(SAXException):
"""Exception class for an unrecognized identifier.
An XMLReader will raise this exception when it is confronted with an
unrecognized feature or property. SAX applications and extensions may
use this class for similar purposes."""
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXNotSupportedException(SAXException):
"""Exception class for an unsupported operation.
An XMLReader will raise this exception when a service it cannot
perform is requested (specifically setting a state or value). SAX
applications and extensions may use this class for similar
purposes."""
# ===== SAXNOTSUPPORTEDEXCEPTION =====
class SAXReaderNotAvailable(SAXNotSupportedException):
"""Exception class for a missing driver.
An XMLReader module (driver) should raise this exception when it
is first imported, e.g. when a support module cannot be imported.
It also may be raised during parsing, e.g. if executing an external
program is not permitted.""" |
48 | test multiple markdown entries to jsonl | # Standard Packages
import json
from pathlib import Path
import os
# Internal Packages
from khoj.processor.markdown.markdown_to_jsonl import MarkdownToJsonl
from khoj.utils.fs_syncer import get_markdown_files
from khoj.utils.rawconfig import TextContentConfig
def test_markdown_file_with_no_headings_to_jsonl(tmp_path):
"Convert files with no heading to jsonl."
# Arrange
entry = f"""
- Bullet point 1
- Bullet point 2
"""
data = {
f"{tmp_path}": entry,
}
expected_heading = f"# {tmp_path.stem}"
# Act
# Extract Entries from specified Markdown files
entry_nodes, file_to_entries = MarkdownToJsonl.extract_markdown_entries(markdown_files=data)
# Process Each Entry from All Notes Files
jsonl_string = MarkdownToJsonl.convert_markdown_maps_to_jsonl(
MarkdownToJsonl.convert_markdown_entries_to_maps(entry_nodes, file_to_entries)
)
jsonl_data = [json.loads(json_string) for json_string in jsonl_string.splitlines()]
# Assert
assert len(jsonl_data) == 1
# Ensure raw entry with no headings do not get heading prefix prepended
assert not jsonl_data[0]["raw"].startswith("#")
# Ensure compiled entry has filename prepended as top level heading
assert jsonl_data[0]["compiled"].startswith(expected_heading)
def test_single_markdown_entry_to_jsonl(tmp_path):
"Convert markdown entry from single file to jsonl."
# Arrange
entry = f"""### Heading
\t\r
Body Line 1
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries, entry_to_file_map = MarkdownToJsonl.extract_markdown_entries(markdown_files=data)
# Process Each Entry from All Notes Files
jsonl_string = MarkdownToJsonl.convert_markdown_maps_to_jsonl(
MarkdownToJsonl.convert_markdown_entries_to_maps(entries, entry_to_file_map)
)
jsonl_data = [json.loads(json_string) for json_string in jsonl_string.splitlines()]
# Assert
assert len(jsonl_data) == 1
def METHOD_NAME(tmp_path):
"Convert multiple markdown entries from single file to jsonl."
# Arrange
entry = f"""
### Heading 1
\t\r
Heading 1 Body Line 1
### Heading 2
\t\r
Heading 2 Body Line 2
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entry_strings, entry_to_file_map = MarkdownToJsonl.extract_markdown_entries(markdown_files=data)
entries = MarkdownToJsonl.convert_markdown_entries_to_maps(entry_strings, entry_to_file_map)
# Process Each Entry from All Notes Files
jsonl_string = MarkdownToJsonl.convert_markdown_maps_to_jsonl(entries)
jsonl_data = [json.loads(json_string) for json_string in jsonl_string.splitlines()]
# Assert
assert len(jsonl_data) == 2
# Ensure entry compiled strings include the markdown files they originate from
assert all([tmp_path.stem in entry.compiled for entry in entries])
def test_get_markdown_files(tmp_path):
"Ensure Markdown files specified via input-filter, input-files extracted"
# Arrange
# Include via input-filter globs
group1_file1 = create_file(tmp_path, filename="group1-file1.md")
group1_file2 = create_file(tmp_path, filename="group1-file2.md")
group2_file1 = create_file(tmp_path, filename="group2-file1.markdown")
group2_file2 = create_file(tmp_path, filename="group2-file2.markdown")
# Include via input-file field
file1 = create_file(tmp_path, filename="notes.md")
# Not included by any filter
create_file(tmp_path, filename="not-included-markdown.md")
create_file(tmp_path, filename="not-included-text.txt")
expected_files = set(
[os.path.join(tmp_path, file.name) for file in [group1_file1, group1_file2, group2_file1, group2_file2, file1]]
)
# Setup input-files, input-filters
input_files = [tmp_path / "notes.md"]
input_filter = [tmp_path / "group1*.md", tmp_path / "group2*.markdown"]
markdown_config = TextContentConfig(
input_files=input_files,
input_filter=[str(filter) for filter in input_filter],
compressed_jsonl=tmp_path / "test.jsonl",
embeddings_file=tmp_path / "test_embeddings.jsonl",
)
# Act
extracted_org_files = get_markdown_files(markdown_config)
# Assert
assert len(extracted_org_files) == 5
assert set(extracted_org_files.keys()) == expected_files
def test_extract_entries_with_different_level_headings(tmp_path):
"Extract markdown entries with different level headings."
# Arrange
entry = f"""
# Heading 1
## Heading 2
"""
data = {
f"{tmp_path}": entry,
}
# Act
# Extract Entries from specified Markdown files
entries, _ = MarkdownToJsonl.extract_markdown_entries(markdown_files=data)
# Assert
assert len(entries) == 2
assert entries[0] == "# Heading 1"
assert entries[1] == "## Heading 2"
# Helper Functions
def create_file(tmp_path: Path, entry=None, filename="test.md"):
markdown_file = tmp_path / filename
markdown_file.touch()
if entry:
markdown_file.write_text(entry)
return markdown_file |
49 | configure system disk | import logging
from virttest import qemu_storage
from virttest import data_dir
from virttest import utils_disk
from provider import backup_utils
from provider import job_utils
from provider.virt_storage.storage_admin import sp_admin
LOG_JOB = logging.getLogger('avocado.test')
class BlockDevCommitTest(object):
def __init__(self, test, params, env):
self.env = env
self.test = test
self.params = params
self.disks_info = list()
self.files_info = list()
self.main_vm = self.prepare_main_vm()
@staticmethod
def get_node_name(tag):
return "drive_%s" % tag
def prepare_main_vm(self):
return self.env.get_vm(self.params["main_vm"])
def get_image_by_tag(self, name):
image_dir = data_dir.get_data_dir()
image_params = self.params.object_params(name)
return qemu_storage.QemuImg(image_params, image_dir, name)
def prepare_snapshot_file(self, snapshot_tags):
self.snapshot_images = list(
map(self.get_image_by_tag, snapshot_tags))
params = self.params.copy()
params.setdefault("target_path", data_dir.get_data_dir())
for tag in snapshot_tags:
image = sp_admin.volume_define_by_params(tag, params)
image.hotplug(self.main_vm)
def verify_data_file(self):
for info in self.files_info:
mount_point, filename = info[0], info[1]
backup_utils.verify_file_md5(
self.main_vm, mount_point, filename)
def create_snapshots(self, snapshot_tags, device):
options = ["node", "overlay"]
cmd = "blockdev-snapshot"
for idx, tag in enumerate(snapshot_tags):
params = self.params.object_params(tag)
arguments = params.copy_from_keys(options)
arguments["overlay"] = self.get_node_name(tag)
if idx == 0:
arguments["node"] = self.device_node
else:
arguments["node"] = self.get_node_name(
snapshot_tags[idx - 1])
self.main_vm.monitor.cmd(cmd, dict(arguments))
for info in self.disks_info:
if device in info:
self.generate_tempfile(info[1], tag)
def commit_snapshots(self):
job_id_list = []
for device in self.params["device_tag"].split():
device_params = self.params.object_params(device)
snapshot_tags = device_params["snapshot_tags"].split()
self.device_node = self.get_node_name(device)
options = ["base-node", "top-node", "speed"]
arguments = self.params.copy_from_keys(options)
arguments["base-node"] = self.get_node_name(device)
arguments["top-node"] = self.get_node_name(snapshot_tags[-2])
device = self.get_node_name(snapshot_tags[-1])
if len(self.params["device_tag"].split()) == 1:
backup_utils.block_commit(self.main_vm, device, **arguments)
else:
commit_cmd = backup_utils.block_commit_qmp_cmd
cmd, args = commit_cmd(device, **arguments)
job_id = args.get("job-id", device)
job_id_list.append(job_id)
self.main_vm.monitor.cmd(cmd, args)
for job_id in job_id_list:
job_utils.wait_until_block_job_completed(self.main_vm, job_id)
@staticmethod
def get_linux_disk_path(session, disk_size):
disks = utils_disk.get_linux_disks(session, True)
for kname, attr in disks.items():
if attr[1] == disk_size and attr[2] == "disk":
return kname
return None
def configure_disk(self, tag):
"""
support configuration on both system and data disk
"""
if tag == self.params["images"].split()[0]:
self.METHOD_NAME(tag)
else:
self.configure_data_disk(tag)
def METHOD_NAME(self, tag):
self.disks_info.append(["", self.params["mount_point"], tag])
def configure_data_disk(self, tag):
os_type = self.params["os_type"]
disk_params = self.params.object_params(tag)
disk_size = disk_params["image_size"]
session = self.main_vm.wait_for_login()
try:
if os_type != "windows":
disk_id = self.get_linux_disk_path(session, disk_size)
assert disk_id, "Disk not found in guest!"
mount_point = utils_disk.configure_empty_linux_disk(
session, disk_id, disk_size)[0]
self.disks_info.append([
r"/dev/%s1" %
disk_id, mount_point, tag])
else:
disk_id = utils_disk.get_windows_disks_index(
session, disk_size)
driver_letter = utils_disk.configure_empty_windows_disk(
session, disk_id, disk_size)[0]
mount_point = r"%s:\\" % driver_letter
self.disks_info.append([disk_id, mount_point, tag])
finally:
session.close()
def generate_tempfile(self, root_dir, filename="data",
size="10M", timeout=360):
backup_utils.generate_tempfile(
self.main_vm, root_dir, filename, size, timeout)
self.files_info.append([root_dir, filename])
def pre_test(self):
if not self.main_vm.is_alive():
self.main_vm.create()
self.main_vm.verify_alive()
for device in self.params["device_tag"].split():
device_params = self.params.object_params(device)
snapshot_tags = device_params["snapshot_tags"].split()
self.device_node = self.get_node_name(device)
self.configure_disk(device)
self.prepare_snapshot_file(snapshot_tags)
self.create_snapshots(snapshot_tags, device)
def post_test(self):
try:
self.main_vm.destroy()
for image in self.snapshot_images:
image.remove()
except Exception as error:
LOG_JOB.error(str(error))
def run_test(self):
self.pre_test()
try:
self.commit_snapshots()
self.verify_data_file()
finally:
self.post_test() |
50 | tags | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSessionResult',
'AwaitableGetSessionResult',
'get_session',
'get_session_output',
]
@pulumi.output_type
class GetSessionResult:
"""
The integration account session.
"""
def __init__(__self__, changed_time=None, content=None, created_time=None, id=None, location=None, name=None, METHOD_NAME=None, type=None):
if changed_time and not isinstance(changed_time, str):
raise TypeError("Expected argument 'changed_time' to be a str")
pulumi.set(__self__, "changed_time", changed_time)
if content and not isinstance(content, dict):
raise TypeError("Expected argument 'content' to be a dict")
pulumi.set(__self__, "content", content)
if created_time and not isinstance(created_time, str):
raise TypeError("Expected argument 'created_time' to be a str")
pulumi.set(__self__, "created_time", created_time)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", METHOD_NAME)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="changedTime")
def changed_time(self) -> str:
"""
The changed time.
"""
return pulumi.get(self, "changed_time")
@property
@pulumi.getter
def content(self) -> Optional[Any]:
"""
The session content.
"""
return pulumi.get(self, "content")
@property
@pulumi.getter(name="createdTime")
def created_time(self) -> str:
"""
The created time.
"""
return pulumi.get(self, "created_time")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Gets the resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def METHOD_NAME(self) -> Optional[Mapping[str, str]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Gets the resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetSessionResult(GetSessionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSessionResult(
changed_time=self.changed_time,
content=self.content,
created_time=self.created_time,
id=self.id,
location=self.location,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
type=self.type)
def get_session(integration_account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
session_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSessionResult:
"""
Gets an integration account session.
:param str integration_account_name: The integration account name.
:param str resource_group_name: The resource group name.
:param str session_name: The integration account session name.
"""
__args__ = dict()
__args__['integrationAccountName'] = integration_account_name
__args__['resourceGroupName'] = resource_group_name
__args__['sessionName'] = session_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:logic/v20160601:getSession', __args__, opts=opts, typ=GetSessionResult).value
return AwaitableGetSessionResult(
changed_time=pulumi.get(__ret__, 'changed_time'),
content=pulumi.get(__ret__, 'content'),
created_time=pulumi.get(__ret__, 'created_time'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_session)
def get_session_output(integration_account_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
session_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSessionResult]:
"""
Gets an integration account session.
:param str integration_account_name: The integration account name.
:param str resource_group_name: The resource group name.
:param str session_name: The integration account session name.
"""
... |
51 | print reaction math | #!/usr/bin/env python3
##
## @file printMath.py
## @brief Prints Rule, Reaction, and Event formulas in a given SBML Document
## @author Ben Bornstein
## @author Sarah Keating
##
##
## <!--------------------------------------------------------------------------
## This sample program is distributed under a different license than the rest
## of libSBML. This program uses the open-source MIT license, as follows:
##
## Copyright (c) 2013-2018 by the California Institute of Technology
## (California, USA), the European Bioinformatics Institute (EMBL-EBI, UK)
## and the University of Heidelberg (Germany), with support from the National
## Institutes of Health (USA) under grant R01GM070923. All rights reserved.
##
## Permission is hereby granted, free of charge, to any person obtaining a
## copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in
## all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
## THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
## FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
## DEALINGS IN THE SOFTWARE.
##
## Neither the name of the California Institute of Technology (Caltech), nor
## of the European Bioinformatics Institute (EMBL-EBI), nor of the University
## of Heidelberg, nor the names of any contributors, may be used to endorse
## or promote products derived from this software without specific prior
## written permission.
## ------------------------------------------------------------------------ -->
##
import sys
import os.path
from libsbml import *
def printFunctionDefinition(n, fd):
if fd.isSetMath():
print("FunctionDefinition " + str(n) + ", " + fd.getId())
math = fd.getMath()
# Print function arguments.
if math.getNumChildren() > 1:
print("(" + (math.getLeftChild()).getName())
for n in range (1, math.getNumChildren()):
print(", " + (math.getChild(n)).getName())
print(") := ")
# Print function body.
if math.getNumChildren() == 0:
print("(no body defined)")
else:
math = math.getChild(math.getNumChildren() - 1)
formula = formulaToString(math)
print(formula + "\n")
def printRuleMath(n, r):
if r.isSetMath():
formula = formulaToString(r.getMath())
if len(r.getVariable()) > 0:
print("Rule " + str(n) + ", formula: "
+ r.getVariable() + " = " + formula + "\n")
else:
print("Rule " + str(n) + ", formula: "
+ formula + " = 0" + "\n")
def METHOD_NAME(n, r):
if r.isSetKineticLaw():
kl = r.getKineticLaw()
if kl.isSetMath():
formula = formulaToString(kl.getMath())
print("Reaction " + str(n) + ", formula: " + formula + "\n")
def printEventAssignmentMath(n, ea):
if ea.isSetMath():
variable = ea.getVariable()
formula = formulaToString(ea.getMath())
print(" EventAssignment " + str(n)
+ ", trigger: " + variable + " = " + formula + "\n")
def printEventMath(n, e):
if e.isSetDelay():
formula = formulaToString(e.getDelay().getMath())
print("Event " + str(n) + " delay: " + formula + "\n")
if e.isSetTrigger():
formula = formulaToString(e.getTrigger().getMath())
print("Event " + str(n) + " trigger: " + formula + "\n")
for i in range(0,e.getNumEventAssignments()):
printEventAssignmentMath(i + 1, e.getEventAssignment(i))
print()
def printMath(m):
for n in range(0,m.getNumFunctionDefinitions()):
printFunctionDefinition(n + 1, m.getFunctionDefinition(n))
for n in range(0,m.getNumRules()):
printRuleMath(n + 1, m.getRule(n))
print()
for n in range(0, m.getNumReactions()):
METHOD_NAME(n + 1, m.getReaction(n))
print()
for n in range(0,m.getNumEvents()):
printEventMath(n + 1, m.getEvent(n))
def main (args):
"""Usage: printMath filename
"""
if len(args) != 2:
print("\n" + "Usage: printMath filename" + "\n" + "\n")
return 1
filename = args[1]
document = readSBML(filename)
if document.getNumErrors() > 0:
print("Encountered the following SBML errors:" + "\n")
document.printErrors()
return 1
model = document.getModel()
if model is None:
print("No model present." + "\n")
return 1
printMath(model)
print()
return 0
if __name__ == '__main__':
main(sys.argv) |
52 | write vtk cell types | #! /usr/bin/env python
import os
import pathlib
def _write_vtk_header(file_like):
"""Write the file header."""
file_like.write("# vtk DataFile Version 2.0\n")
file_like.write("Landlab output\n")
file_like.write("ASCII\n")
file_like.write("DATASET UNSTRUCTURED_GRID\n\n")
def _write_vtk_points(grid, file_like, z_at_node):
"""Write the POINTS section of the file(-like)"""
x = grid.x_of_node
y = grid.y_of_node
z = z_at_node
file_like.write("POINTS " + str(grid.number_of_nodes) + " float\n")
for i in range(grid.number_of_nodes):
file_like.write(str(x[i]) + " " + str(y[i]) + " " + str(z[i]) + "\n")
file_like.write("\n")
def _write_vtk_patches(grid, file_like):
"""Write the CELLS section (in a Landlab grid these are patches)"""
num_patches = grid.number_of_patches
nodes_per_patch = len(grid.nodes_at_patch[0])
file_like.write(
"CELLS "
+ str(num_patches)
+ " "
+ str((nodes_per_patch + 1) * num_patches)
+ "\n"
)
for i in range(grid.number_of_patches):
file_like.write(str(nodes_per_patch))
for j in range(nodes_per_patch):
file_like.write(" " + str(grid.nodes_at_patch[i, j]))
file_like.write("\n")
file_like.write("\n")
def METHOD_NAME(grid, file_like):
"""Write the CELL_TYPES section (triangles or quads)"""
file_like.write("CELL_TYPES " + str(grid.number_of_patches) + "\n")
if len(grid.nodes_at_patch[0]) == 3: # triangles
cell_type = "5\n" # vtk code for a triangle
else:
cell_type = "9\n" # vtk code for a quad
for _ in range(grid.number_of_patches):
file_like.write(cell_type)
file_like.write("\n")
def _write_scalar_data(grid, file_like, field):
"""Write the SCALARS section for a given field"""
file_like.write("SCALARS " + field + " float 1\n")
file_like.write("LOOKUP_TABLE default\n")
for i in range(grid.number_of_nodes):
file_like.write(str(grid.at_node[field][i]))
file_like.write("\n")
def _write_vtk_point_data(grid, file_like, fields):
"""Write the POINT_DATA section, which in turn writes a SCALARS
section for each field in `fields`"""
file_like.write("POINT_DATA " + str(grid.number_of_nodes) + "\n")
for fieldname in fields:
_write_scalar_data(grid, file_like, fieldname)
file_like.write("\n")
def write_legacy_vtk(
path, grid, z_at_node="topographic__elevation", fields=None, clobber=False
):
"""
Write grid and field to a legacy VTK format file or file-like object.
Parameters
----------
path : file-like
Path to file or a file-like object
grid : Landlab grid object
The grid for which to output data
z_at_node : str or (n_nodes, ) ndarray
Field name or array of values to use for z coordinate
fields : list of str (optional)
List of node fields to output; default is all node fields
clobber : bool (optional)
Ok to overwrite existing file (default False)
Examples
--------
>>> import io
>>> import numpy as np
>>> from landlab import HexModelGrid
>>> from landlab.io.legacy_vtk import write_legacy_vtk
>>> grid = HexModelGrid((3, 2))
>>> topo = grid.add_zeros("topographic__elevation", at="node")
>>> topo[:] = np.arange(len(topo))
>>> water = grid.add_zeros("surface_water__depth", at="node")
>>> water[:] = 0.1 * (7.0 - topo)
>>> vtk_file = write_legacy_vtk(io.StringIO(), grid)
>>> lines = vtk_file.getvalue().splitlines()
>>> print(lines[0])
# vtk DataFile Version 2.0
>>> for i in range(5, 13):
... print(lines[i])
POINTS 7 float
0.5 0.0 0.0
1.5 0.0 1.0
0.0 0.866025 2.0
1.0 0.866025 3.0
2.0 0.866025 4.0
0.5 1.732051 5.0
1.5 1.732051 6.0
>>> for i in range(14, 21):
... print(lines[i])
CELLS 6 24
3 3 0 1
3 3 2 0
3 4 3 1
3 5 2 3
3 6 3 4
3 6 5 3
>>> for i in range(22, 29):
... print(lines[i])
CELL_TYPES 6
5
5
5
5
5
5
>>> for i in range(30, 49):
... print(lines[i])
POINT_DATA 7
SCALARS topographic__elevation float 1
LOOKUP_TABLE default
0.0
1.0
2.0
3.0
4.0
5.0
6.0
SCALARS surface_water__depth float 1
LOOKUP_TABLE default
0.7
0.6
0.5
0.4
0.3
0.2
0.1
"""
if isinstance(z_at_node, str):
z_at_node = grid.at_node[z_at_node]
if fields is None:
fields = grid.at_node.keys()
if isinstance(path, (str, pathlib.Path)):
if os.path.exists(path) and not clobber:
raise ValueError(f"file exists ({path})")
with open(path, "w") as fp:
_write_legacy_vtk_to_filelike(fp, grid, z_at_node, fields)
else:
_write_legacy_vtk_to_filelike(path, grid, z_at_node, fields)
return path
def _write_legacy_vtk_to_filelike(file_like, grid, z_at_node, fields):
"""Write output to specified file_like"""
_write_vtk_header(file_like)
_write_vtk_points(grid, file_like, z_at_node)
_write_vtk_patches(grid, file_like)
METHOD_NAME(grid, file_like)
_write_vtk_point_data(grid, file_like, fields) |
53 | get cisco snmp obj | #!/usr/bin/env python
#############################################################################
# Copyright (c) 2020 One Identity
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published
# by the Free Software Foundation, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
# As an additional exemption you are allowed to compile & link against the
# OpenSSL libraries as published by the OpenSSL project. See the file
# COPYING for details.
#
#############################################################################
import os
import re
from pathlib import Path
import pytest
from psutil import TimeoutExpired
from src.common.blocking import wait_until_true
from src.common.file import File
from src.executors.process_executor import ProcessExecutor
class SNMPtrapd(object):
TRAP_LOG_PREFIX = 'LIGHT_TEST_SNMP_TRAP_RECEIVED:'
def __init__(self, port):
self.snmptrapd_proc = None
self.port = port
self.snmptrapd_log = Path("snmptrapd_log")
self.snmptrapd_stdout_path = Path("snmptrapd_stdout")
self.snmptrapd_stderr_path = Path("snmptrapd_stderr")
def wait_for_snmptrapd_log_creation(self):
return self.snmptrapd_log.exists()
def wait_for_snmptrapd_startup(self):
return "NET-SNMP version" in self.snmptrapd_log.read_text()
def start(self):
if self.snmptrapd_proc is not None:
return
self.snmptrapd_proc = ProcessExecutor().start(
[
"snmptrapd", "-f",
"--disableAuthorization=yes",
"-C",
"-m ALL",
"-A",
"-Ddump",
"-On",
"--doNotLogTraps=no",
"--authCommunity=log public",
self.port,
"-d",
"-Lf", os.path.relpath(str(self.snmptrapd_log)),
"-F", "{}%v\n".format(self.TRAP_LOG_PREFIX),
],
self.snmptrapd_stdout_path,
self.snmptrapd_stderr_path,
)
wait_until_true(self.wait_for_snmptrapd_log_creation)
wait_until_true(self.wait_for_snmptrapd_startup)
return self.snmptrapd_proc.is_running()
def stop(self):
if self.snmptrapd_proc is None:
return
self.snmptrapd_proc.terminate()
try:
self.snmptrapd_proc.wait(4)
except TimeoutExpired:
self.snmptrapd_proc.kill()
self.snmptrapd_proc = None
def get_port(self):
return self.port
def get_traps(self, counter):
trap_list = []
f = File(self.snmptrapd_log)
f.open("r")
while True:
trap_line = f.wait_for_lines([self.TRAP_LOG_PREFIX])[0]
res = re.match('({})(.*)'.format(self.TRAP_LOG_PREFIX), trap_line)
if (res):
trap_list.extend(res.group(2).rstrip().split("\t"))
if len(trap_list) == counter:
break
f.close()
return sorted(trap_list)
def get_log(self):
f = File(self.snmptrapd_log)
f.open("r")
log = f.read()
f.close()
return log
@pytest.fixture
def snmptrapd(port_allocator):
server = SNMPtrapd(port_allocator())
server.start()
yield server
server.stop()
class SNMPTestParams(object):
def __init__(self):
pass
def get_ip_address(self):
return '"127.0.0.1"'
def get_default_community(self):
return 'public'
def get_basic_snmp_obj(self):
return '".1.3.6.1.4.1.18372.3.1.1.1.1.1.0", "Octetstring", "admin"'
def get_basic_trap_obj(self):
return '".1.3.6.1.6.3.1.1.4.1.0", "Objectid", ".1.3.6.1.4.1.18372.3.1.1.1.2.1"'
def get_cisco_trap_obj(self):
return '".1.3.6.1.6.3.1.1.4.1.0","Objectid",".1.3.6.1.4.1.9.9.41.2.0.1"'
def METHOD_NAME(self):
cisco_snmp_obj = (
'"1.3.6.1.4.1.9.9.41.1.2.3.1.2.55", "Octetstring", "SYS"',
'"1.3.6.1.4.1.9.9.41.1.2.3.1.3.55", "Integer", "6"',
'"1.3.6.1.4.1.9.9.41.1.2.3.1.4.55", "Octetstring", "CONFIG_I"',
'"1.3.6.1.4.1.9.9.41.1.2.3.1.5.55", "Octetstring", "Configured from console by vty1 (10.30.0.32)"',
'"1.3.6.1.4.1.9.9.41.1.2.3.1.6.55", "Timeticks", "97881"',
)
return cisco_snmp_obj
def get_expected_cisco_trap(self):
return sorted([
'.1.3.6.1.4.1.9.9.41.1.2.3.1.2.55 = STRING: "SYS"',
'.1.3.6.1.4.1.9.9.41.1.2.3.1.3.55 = INTEGER: 6',
'.1.3.6.1.4.1.9.9.41.1.2.3.1.4.55 = STRING: "CONFIG_I"',
'.1.3.6.1.4.1.9.9.41.1.2.3.1.5.55 = STRING: "Configured from console by vty1 (10.30.0.32)"',
'.1.3.6.1.4.1.9.9.41.1.2.3.1.6.55 = Timeticks: (97881) 0:16:18.81',
'.1.3.6.1.6.3.1.1.4.1.0 = OID: .1.3.6.1.4.1.18372.3.1.1.1.2.1',
])
def get_expected_basic_trap(self):
return sorted([
'.1.3.6.1.4.1.18372.3.1.1.1.1.1.0 = STRING: "admin"',
'.1.3.6.1.6.3.1.1.4.1.0 = OID: .1.3.6.1.4.1.18372.3.1.1.1.2.1',
])
def get_expected_empty_trap(self):
return [
'.1.3.6.1.6.3.1.1.4.1.0 = OID: .1.3.6.1.4.1.18372.3.1.1.1.2.1',
]
@pytest.fixture
def snmp_test_params():
return SNMPTestParams() |
54 | prepare lib | """
mbed SDK
Copyright (c) 2014-2017 ARM Limited
Copyright (c) 2018 ON Semiconductor
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import stat
import os
from os.path import splitext, basename, dirname, abspath, isdir
from os import remove, mkdir
from shutil import rmtree, copyfile
from tools.targets import TARGET_MAP
from tools.export.exporters import Exporter
from tools.export.makefile import GccArm
class CodeBlocks(GccArm):
NAME = 'Code::Blocks'
DOT_IN_RELATIVE_PATH = True
MBED_CONFIG_HEADER_SUPPORTED = True
PREPROCESS_ASM = False
POST_BINARY_WHITELIST = set([
"PSOC6Code.complete"
])
@staticmethod
def filter_dot(str_in):
"""
Remove the './' prefix, if present.
This function assumes that resources.win_to_unix()
replaced all windows backslashes with slashes.
"""
if str_in is None:
return None
if str_in[:2] == './':
return str_in[2:]
return str_in
@staticmethod
def METHOD_NAME(libname):
if "lib" == libname[:3]:
libname = libname[3:-2]
return "-l" + libname
@staticmethod
def prepare_sys_lib(libname):
return "-l" + libname
def generate(self):
self.resources.win_to_unix()
comp_flags = []
debug_flags = []
release_flags = [ '-Os', '-g1' ]
next_is_include = False
for f in self.flags['c_flags'] + self.flags['cxx_flags'] + self.flags['common_flags']:
f = f.strip()
if f == "-include":
next_is_include = True
continue
if f == '-c':
continue
if next_is_include:
f = '-include ' + f
next_is_include = False
if f.startswith('-O') or f.startswith('-g'):
debug_flags.append(f)
else:
comp_flags.append(f)
comp_flags = sorted(list(set(comp_flags)))
inc_dirs = [self.filter_dot(s) for s in self.resources.inc_dirs];
inc_dirs = [x for x in inc_dirs if (x is not None and
x != '' and x != '.' and
not x.startswith('bin') and
not x.startswith('obj'))];
c_sources = sorted([self.filter_dot(s) for s in self.resources.c_sources])
libraries = [self.METHOD_NAME(basename(lib)) for lib in self.libraries]
sys_libs = [self.prepare_sys_lib(lib) for lib
in self.toolchain.sys_libs]
ncs36510fib = (hasattr(self.toolchain.target, 'post_binary_hook') and
self.toolchain.target.post_binary_hook['function'] == 'NCS36510TargetCode.ncs36510_addfib')
if ncs36510fib:
c_sources.append('ncs36510fib.c')
c_sources.append('ncs36510trim.c')
ctx = {
'project_name': self.project_name,
'debug_flags': debug_flags,
'release_flags': release_flags,
'comp_flags': comp_flags,
'ld_flags': self.flags['ld_flags'],
'headers': sorted(list(set([self.filter_dot(s) for s in self.resources.headers]))),
'c_sources': c_sources,
's_sources': sorted([self.filter_dot(s) for s in self.resources.s_sources]),
'cpp_sources': sorted([self.filter_dot(s) for s in self.resources.cpp_sources]),
'include_paths': inc_dirs,
'linker_script': self.filter_dot(self.resources.linker_script),
'libraries': libraries,
'sys_libs': sys_libs,
'ncs36510addfib': ncs36510fib,
'openocdboard': ''
}
openocd_board = {
'NCS36510': 'board/ncs36510_axdbg.cfg',
'DISCO_F429ZI': 'board/stm32f429discovery.cfg',
'DISCO_F469NI': 'board/stm32f469discovery.cfg',
'DISCO_L072CZ_LRWAN1': 'board/stm32l0discovery.cfg',
'DISCO_F769NI': 'board/stm32f7discovery.cfg',
'DISCO_L475VG_IOT01A': 'board/stm32l4discovery.cfg',
'DISCO_L476VG': 'board/stm32l4discovery.cfg',
'CY8CKIT_062_WIFI_BT': 'board/cy8ckit_062_ble.cfg'
}
if self.target in openocd_board:
ctx['openocdboard'] = openocd_board[self.target]
self.gen_file('codeblocks/cbp.tmpl', ctx, "%s.%s" % (self.project_name, 'cbp'))
for f in [ 'obj', 'bin' ]:
if not isdir(f):
mkdir(f)
self.gen_file_nonoverwrite('codeblocks/mbedignore.tmpl',
ctx, f + '/.mbedignore')
if ncs36510fib:
genaddfiles = [ 'ncs36510fib.c', 'ncs36510trim.c' ]
for f in genaddfiles:
copyfile(os.path.join(dirname(abspath(__file__)), f),
self.gen_file_dest(f))
ignorefiles = genaddfiles
try:
with open(self.gen_file_dest('.mbedignore'), 'r') as f:
l = set(map(lambda x: x.strip(), f.readlines()))
ignorefiles = [x for x in genaddfiles if x not in l]
except IOError as e:
pass
except:
raise
if ignorefiles:
with open(self.gen_file_dest('.mbedignore'), 'a') as f:
for fi in ignorefiles:
f.write("%s\n" % fi)
# finally, generate the project file
super(CodeBlocks, self).generate()
@staticmethod
def clean(project_name):
for ext in ['cbp', 'depend', 'layout']:
remove("%s.%s" % (project_name, ext))
for f in ['openocd.log', 'ncs36510fib.c', 'ncs36510trim.c']:
remove(f)
for d in ['bin', 'obj']:
rmtree(d, ignore_errors=True) |
55 | update local dataset | import logging
import copy
import torch
from torch import nn
from .fednova import FedNova
class Client:
def __init__(
self,
client_idx,
local_training_data,
local_test_data,
local_sample_number,
args,
device,
):
self.client_idx = client_idx
self.local_training_data = local_training_data
self.local_test_data = local_test_data
self.local_sample_number = local_sample_number
logging.info("self.local_sample_number = " + str(self.local_sample_number))
self.args = args
self.device = device
"""
stackoverflow_lr is the task of multi-label classification
please refer to following links for detailed explainations on cross-entropy and corresponding implementation of tff research:
https://towardsdatascience.com/cross-entropy-for-classification-d98e7f974451
https://github.com/google-research/federated/blob/49a43456aa5eaee3e1749855eed89c0087983541/optimization/stackoverflow_lr/federated_stackoverflow_lr.py#L131
"""
if self.args.dataset == "stackoverflow_lr":
self.criterion = nn.BCELoss(reduction="sum").to(device)
else:
self.criterion = nn.CrossEntropyLoss().to(device)
def METHOD_NAME(
self, client_idx, local_training_data, local_test_data, local_sample_number
):
self.client_idx = client_idx
self.local_training_data = local_training_data
self.local_test_data = local_test_data
self.local_sample_number = local_sample_number
def get_sample_number(self):
return self.local_sample_number
def get_local_norm_grad(self, opt, cur_params, init_params, weight=0):
if weight == 0:
weight = opt.ratio
grad_dict = {}
for k in cur_params.keys():
scale = 1.0 / opt.local_normalizing_vec
cum_grad = init_params[k] - cur_params[k]
cum_grad.mul_(weight * scale)
grad_dict[k] = cum_grad
return grad_dict
def get_local_tau_eff(self, opt):
if opt.mu != 0:
return opt.local_steps * opt.ratio
else:
return opt.local_normalizing_vec * opt.ratio
def reset_fednova_optimizer(self, opt):
opt.local_counter = 0
opt.local_normalizing_vec = 0
opt.local_steps = 0
for group in opt.param_groups:
for p in group["params"]:
param_state = opt.state[p]
param_state["cum_grad"].zero_()
# Reinitialize momentum buffer
if "momentum_buffer" in param_state:
param_state["momentum_buffer"].zero_()
def train(self, net, ratio):
net.train()
# train and update
init_params = copy.deepcopy(net.state_dict())
optimizer = FedNova(
net.parameters(),
lr=self.args.learning_rate,
gmf=self.args.gmf,
mu=self.args.mu,
ratio=ratio,
momentum=self.args.momentum,
dampening=self.args.dampening,
weight_decay=self.args.wd,
nesterov=self.args.nesterov,
)
epoch_loss = []
for epoch in range(self.args.epochs):
batch_loss = []
for batch_idx, (x, labels) in enumerate(self.local_training_data):
x, labels = x.to(self.device), labels.to(self.device)
net = net.to(self.device)
net.zero_grad()
log_probs = net(x)
loss = self.criterion(log_probs, labels) # pylint: disable=E1102
loss.backward()
# to avoid nan loss
# torch.nn.utils.clip_grad_norm_(net.parameters(), 0.5)
optimizer.step()
# logging.info('Update Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# epoch, (batch_idx + 1) * self.args.batch_size, len(self.local_training_data) * self.args.batch_size,
# 100. * (batch_idx + 1) / len(self.local_training_data), loss.item()))
batch_loss.append(loss.item())
epoch_loss.append(sum(batch_loss) / len(batch_loss))
# logging.info('Client Index = {}\tEpoch: {}\tLoss: {:.6f}'.format(
# self.client_idx, epoch, sum(epoch_loss) / len(epoch_loss)))
norm_grad = self.get_local_norm_grad(optimizer, net.state_dict(), init_params)
tau_eff = self.get_local_tau_eff(optimizer)
# self.reset_fednova_optimizer(optimizer)
return sum(epoch_loss) / len(epoch_loss), norm_grad, tau_eff
def local_test(self, model_global, b_use_test_dataset=False):
model_global.eval()
model_global.to(self.device)
metrics = {
"test_correct": 0,
"test_loss": 0,
"test_precision": 0,
"test_recall": 0,
"test_total": 0,
}
if b_use_test_dataset:
test_data = self.local_test_data
else:
test_data = self.local_training_data
with torch.no_grad():
for batch_idx, (x, target) in enumerate(test_data):
x = x.to(self.device)
target = target.to(self.device)
pred = model_global(x)
loss = self.criterion(pred, target) # pylint: disable=E1102
if self.args.dataset == "stackoverflow_lr":
predicted = (pred > 0.5).int()
correct = predicted.eq(target).sum(axis=-1).eq(target.size(1)).sum()
true_positive = ((target * predicted) > 0.1).int().sum(axis=-1)
precision = true_positive / (predicted.sum(axis=-1) + 1e-13)
recall = true_positive / (target.sum(axis=-1) + 1e-13)
metrics["test_precision"] += precision.sum().item()
metrics["test_recall"] += recall.sum().item()
else:
_, predicted = torch.max(pred, -1)
correct = predicted.eq(target).sum()
metrics["test_correct"] += correct.item()
metrics["test_loss"] += loss.item() * target.size(0)
metrics["test_total"] += target.size(0)
return metrics |
56 | proxy call | import json
from collections import defaultdict
from dataclasses import dataclass
from threading import Lock, get_ident
from types import FunctionType, TracebackType
from typing import Any, Dict, List, Optional, Tuple, Union
from lazy_object_proxy import Proxy
from wrapt import ObjectProxy
from brownie._config import BROWNIE_FOLDER, CONFIG
from brownie.exceptions import ContractNotFound
from brownie.network import accounts, web3
from brownie.network.contract import Contract, ContractCall
from brownie.project import compile_source
DATA_DIR = BROWNIE_FOLDER.joinpath("data")
MULTICALL2_ABI = json.loads(DATA_DIR.joinpath("interfaces", "Multicall2.json").read_text())
MULTICALL2_SOURCE = DATA_DIR.joinpath("contracts", "Multicall2.sol").read_text()
@dataclass
class Call:
calldata: Tuple[str, bytes]
decoder: FunctionType
class Result(ObjectProxy):
"""A proxy object to be updated with the result of a multicall."""
def __repr__(self) -> str:
return repr(self.__wrapped__)
class LazyResult(Proxy):
"""A proxy object to be updated with the result of a multicall."""
def __repr__(self) -> str:
return repr(self.__wrapped__)
class Multicall:
"""Context manager for batching multiple calls to constant contract functions."""
_lock = Lock()
def __init__(self) -> None:
self.address = None
self._block_number = defaultdict(lambda: None) # type: ignore
self._contract = None
self._pending_calls: Dict[int, List[Call]] = defaultdict(list)
setattr(ContractCall, "__original_call_code", ContractCall.__call__.__code__)
setattr(ContractCall, "__proxy_call_code", self.METHOD_NAME.__code__)
setattr(ContractCall, "__multicall", defaultdict(lambda: None))
ContractCall.__call__.__code__ = self.METHOD_NAME.__code__
@property
def block_number(self) -> int:
return self._block_number[get_ident()]
def __call__(
self, address: Optional[str] = None, block_identifier: Union[str, bytes, int, None] = None
) -> "Multicall":
self.address = address # type: ignore
self._block_number[get_ident()] = block_identifier # type: ignore
return self
def _flush(self, future_result: Result = None) -> Any:
pending_calls = self._pending_calls[get_ident()]
self._pending_calls[get_ident()] = []
if not pending_calls:
# either all calls have already been made
# or this result has already been retrieved
return future_result
with self._lock:
ContractCall.__call__.__code__ = getattr(ContractCall, "__original_call_code")
results = self._contract.tryAggregate( # type: ignore
False,
[_call.calldata for _call in pending_calls],
block_identifier=self._block_number[get_ident()],
)
ContractCall.__call__.__code__ = getattr(ContractCall, "__proxy_call_code")
for _call, result in zip(pending_calls, results):
_call.__wrapped__ = _call.decoder(result[1]) if result[0] else None # type: ignore
return future_result
def flush(self) -> Any:
"""Flush the pending queue of calls, retrieving all the results."""
return self._flush()
def _call_contract(self, call: ContractCall, *args: Tuple, **kwargs: Dict[str, Any]) -> Proxy:
"""Add a call to the buffer of calls to be made"""
calldata = (call._address, call.encode_input(*args, **kwargs)) # type: ignore
call_obj = Call(calldata, call.decode_output) # type: ignore
# future result
result = Result(call_obj)
self._pending_calls[get_ident()].append(result)
return LazyResult(lambda: self._flush(result))
@staticmethod
def METHOD_NAME(*args: Tuple, **kwargs: Dict[str, Any]) -> Any:
"""Proxy code which substitutes `ContractCall.__call__"""
self = getattr(ContractCall, "__multicall", {}).get(get_ident())
if self:
return self._call_contract(*args, **kwargs)
# standard call we let pass through
ContractCall.__call__.__code__ = getattr(ContractCall, "__original_call_code")
result = ContractCall.__call__(*args, **kwargs) # type: ignore
ContractCall.__call__.__code__ = getattr(ContractCall, "__proxy_call_code")
return result
def __enter__(self) -> "Multicall":
"""Enter the Context Manager and substitute `ContractCall.__call__`"""
# we set the code objects on ContractCall class so we can grab them later
active_network = CONFIG.active_network
if "multicall2" in active_network:
self.address = active_network["multicall2"]
elif "cmd" in active_network:
deployment = self.deploy({"from": accounts[0]})
self.address = deployment.address # type: ignore
self._block_number[get_ident()] = deployment.tx.block_number # type: ignore
self._block_number[get_ident()] = (
self._block_number[get_ident()] or web3.eth.get_block_number()
)
if self.address is None:
raise ContractNotFound(
"Must set Multicall address via `brownie.multicall(address=...)`"
)
elif not web3.eth.get_code(self.address, block_identifier=self.block_number):
raise ContractNotFound(
f"Multicall at address {self.address} does not exist at block {self.block_number}"
)
self._contract = Contract.from_abi("Multicall", self.address, MULTICALL2_ABI)
getattr(ContractCall, "__multicall")[get_ident()] = self
def __exit__(self, exc_type: Exception, exc_val: Any, exc_tb: TracebackType) -> None:
"""Exit the Context Manager and reattach original `ContractCall.__call__` code"""
self.flush()
getattr(ContractCall, "__multicall")[get_ident()] = None
@staticmethod
def deploy(tx_params: Dict) -> Contract:
"""Deploy an instance of the `Multicall2` contract.
Args:
tx_params: parameters passed to the `deploy` method of the `Multicall2` contract
container.
"""
project = compile_source(MULTICALL2_SOURCE)
deployment = project.Multicall2.deploy(tx_params) # type: ignore
CONFIG.active_network["multicall2"] = deployment.address
return deployment |
57 | create usgs | import os
import re
from typing import Optional, Union
from pathlib import Path
from flood_forecast.preprocessing.closest_station import (
get_weather_data,
process_asos_data,
)
from flood_forecast.preprocessing.process_usgs import (
make_usgs_data,
process_intermediate_csv,
)
from flood_forecast.gcp_integration.basic_utils import (
get_storage_client,
upload_file,
download_file,
)
from flood_forecast.preprocessing.eco_gage_set import eco_gage_set
import json
from datetime import datetime
import pytz
import pandas as pd
def build_weather_csv(
json_full_path,
asos_base_url,
base_url_2,
econet_data,
visited_gages_path,
start=0,
end_index=100,
):
directory = os.fsencode(json_full_path)
sorted_list = sorted(os.listdir(directory))
for i in range(start, end_index):
file = sorted_list[i]
filename = os.fsdecode(file)
get_weather_data(
os.path.join(json_full_path, filename),
econet_data,
asos_base_url,
visited_gages_path,
)
process_asos_data(
os.path.join(json_full_path, filename),
base_url_2,
visited_gages_path,
)
# todo fix this function so it does more than open files
# def make_usgs(meta_data_path: str, start, end_index: int):
# meta_directory = os.fsencode(meta_data_path)
# sorted_list = sorted(os.listdir(meta_directory))
# for i in range(start, end_index):
# with open(sorted_list[i]) as d:
# data = json.loads(d)
# # make_usgs_data(datetime(2014, 1, 1), datetime(2019,1,1), data["gage_id"])
def join_data(weather_csv, meta_json_file, flow_csv):
pass
def create_visited():
visited_gages = {"stations_visited": {}, "saved_complete": {}}
with open("visited_gages.json", "w+") as f:
json.dump(visited_gages, f)
def get_eco_netset(directory_path: str) -> set:
"""
Econet data was supplied to us by the NC State climate office. They gave
us a directory of CSV files in following format `LastName_First_station_id_Hourly.txt`
This code simply constructs a set of stations based on what is in the folder.
"""
directory = os.fsencode(directory_path)
print(sorted(os.listdir(directory)))
for file in sorted(os.listdir(directory)):
filename = os.fsdecode(file)
try:
eco_gage_set.add(filename.split("c_")[1].split("_H")[0])
except BaseException:
print(filename)
return eco_gage_set
def combine_data(flow_df: pd.DataFrame, precip_df: pd.DataFrame):
tz = pytz.timezone("UTC")
precip_df["hour_updated"] = precip_df["hour_updated"].map(
lambda x: datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
)
precip_df["hour_updated"] = precip_df["hour_updated"].map(
lambda x: tz.localize(x)
)
joined_df = precip_df.merge(
flow_df, left_on="hour_updated", right_on="datetime", how="outer"
)[4:-4]
nan_precip = sum(pd.isnull(joined_df["p01m"]))
nan_flow = sum(pd.isnull(joined_df["cfs"]))
return joined_df, nan_flow, nan_precip
def METHOD_NAME(meta_data_dir: str, precip_path: str, start: int, end: int):
gage_list = sorted(os.listdir(meta_data_dir))
exceptions = {}
client = get_storage_client()
for i in range(start, end):
try:
file_name = gage_list[i]
gage_id = file_name.split("stations")[0]
with open(os.path.join(meta_data_dir, file_name)) as f:
print(os.path.join(meta_data_dir, file_name))
data = json.load(f)
if len(gage_id) == 7:
gage_id = "0" + gage_id
raw_df = make_usgs_data(
datetime(2014, 1, 1), datetime(2019, 1, 1), gage_id
)
else:
raw_df = make_usgs_data(
datetime(2014, 1, 1), datetime(2019, 1, 1), gage_id
)
df, max_flow, min_flow = process_intermediate_csv(raw_df)
data["time_zone_code"] = df["tz_cd"].iloc[0]
data["max_flow"] = max_flow
data["min_flow"] = min_flow
precip_df = pd.read_csv(
os.path.join(
precip_path, data["stations"][0]["station_id"] + ".csv"
)
)
fixed_df, nan_flow, nan_precip = combine_data(df, precip_df)
data["nan_flow"] = nan_flow
data["nan_precip"] = nan_precip
joined_name = (
str(gage_id) + data["stations"][0]["station_id"] + "_flow.csv"
)
joined_upload = "joined/" + joined_name
meta_path = os.path.join(meta_data_dir, file_name)
data["files"] = [joined_name]
fixed_df.to_csv(joined_name)
with open(meta_path, "w") as f:
json.dump(data, f)
upload_file("predict_cfs", "meta2/" + file_name, meta_path, client)
upload_file("predict_cfs", joined_upload, joined_name, client)
except Exception as e:
exceptions[str(gage_id)] = str(e)
with open("exceptions.json", "w+") as a:
json.dump(exceptions, a)
print("exception")
upload_file(
"predict_cfs",
"meta2/" + "exceptions.json",
"exceptions.json",
client,
)
def get_data(file_path: str, gcp_service_key: Optional[str] = None) -> Union[str, pd.DataFrame]:
"""Extract bucket name and storage object name from file_path
Args:
file_path (str): [description]
Example,
file_path = "gs://task_ts_data/2020-08-17/Afghanistan____.csv"
bucket_name = "task_ts_data"
object_name = "2020-08-17/Afghanistan____.csv"
loal_temp_filepath = "//data/2020-08-17/Afghanistan____.csv"
Returns:
str: local file name
"""
if isinstance(file_path, pd.DataFrame):
return file_path
if file_path.startswith("gs://"):
# download data from gcs to local
print(file_path)
regex = r"(?<=gs:\/\/)[a-zA-Z0-9\-\_]*(?=\/)"
bucket_name = re.search(regex, file_path).group()
object_name = re.search(rf"(?<={bucket_name}\/).*", file_path).group()
local_temp_filepath = Path("data") / bucket_name / object_name
if not local_temp_filepath.parent.exists():
local_temp_filepath.parent.mkdir(parents=True, exist_ok=True)
download_file(
bucket_name=bucket_name,
source_blob_name=object_name,
destination_file_name=local_temp_filepath,
service_key_path=gcp_service_key,
)
if str(local_temp_filepath)[-3:] != "csv":
return local_temp_filepath
return pd.read_csv(str(local_temp_filepath))
elif str(file_path)[-3:] != "csv":
return file_path
return pd.read_csv(file_path) |
58 | test not populated move | import unittest
from zulip_bots.simple_lib import SimpleStorage
from ..libraries import database, game_data, interface, mechanics
class GridTest(unittest.TestCase):
def test_out_of_grid(self):
points = [[v, h] for h in range(7) for v in range(7)]
expected_outcomes = [
True,
False,
False,
True,
False,
False,
True,
False,
True,
False,
True,
False,
True,
False,
False,
False,
True,
True,
True,
False,
False,
True,
True,
True,
False,
True,
True,
True,
False,
False,
True,
True,
True,
False,
False,
False,
True,
False,
True,
False,
True,
False,
True,
False,
False,
True,
False,
False,
True,
]
test_outcomes = [mechanics.is_in_grid(point[0], point[1]) for point in points]
self.assertListEqual(test_outcomes, expected_outcomes)
def test_jump_and_grids(self):
points = [
[0, 0, 1, 1],
[1, 1, 2, 2],
[2, 2, 3, 3],
[0, 0, 0, 2],
[0, 0, 2, 2],
[6, 6, 5, 4],
]
expected_outcomes = [True, True, True, True, True, True]
test_outcomes = [
mechanics.is_jump(point[0], point[1], point[2], point[3]) for point in points
]
self.assertListEqual(test_outcomes, expected_outcomes)
def test_jump_special_cases(self):
points = [
[0, 0, 0, 3],
[0, 0, 3, 0],
[6, 0, 6, 3],
[4, 2, 6, 2],
[4, 3, 3, 4],
[4, 3, 2, 2],
[0, 0, 0, 6],
[0, 0, 1, 1],
[0, 0, 2, 2],
[3, 0, 3, 1],
[3, 0, 3, 2],
[3, 1, 3, 0],
[3, 1, 3, 2],
]
expected_outcomes = [
False,
False,
False,
True,
True,
True,
True,
True,
True,
False,
True,
False,
False,
]
test_outcomes = [
mechanics.is_jump(point[0], point[1], point[2], point[3]) for point in points
]
self.assertListEqual(test_outcomes, expected_outcomes)
def METHOD_NAME(self):
grid = interface.construct_grid("XXXNNNOOOXXXNNNOOOXXXNNN")
moves = [[0, 0, 1, 1], [0, 3, 1, 3], [5, 1, 5, 3], [0, 0, 0, 3], [0, 0, 3, 0]]
expected_outcomes = [True, True, False, False, False]
test_outcomes = [mechanics.is_empty(move[2], move[3], grid) for move in moves]
self.assertListEqual(test_outcomes, expected_outcomes)
def test_legal_move(self):
grid = interface.construct_grid("XXXNNNOOONNNNNNOOONNNNNN")
presets = [
[0, 0, 0, 3, "X", 1],
[0, 0, 0, 6, "X", 2],
[0, 0, 3, 6, "X", 3],
[0, 0, 2, 2, "X", 3],
]
expected_outcomes = [False, False, True, False]
test_outcomes = [
mechanics.is_legal_move(
preset[0], preset[1], preset[2], preset[3], preset[4], preset[5], grid
)
for preset in presets
]
self.assertListEqual(test_outcomes, expected_outcomes)
def test_legal_put(self):
grid = interface.construct_grid("XXXNNNOOOXXXNNNOOOXXXNNN")
presets = [[0, 0, 1], [0, 3, 2], [0, 6, 3], [1, 1, 2], [1, 3, 1], [1, 6, 1], [1, 5, 1]]
expected_outcomes = [False, False, False, False, True, False, True]
test_outcomes = [
mechanics.is_legal_put(preset[0], preset[1], grid, preset[2]) for preset in presets
]
self.assertListEqual(test_outcomes, expected_outcomes)
def test_legal_take(self):
grid = interface.construct_grid("XXXNNNOOOXXXNNNOOOXXXNNN")
presets = [
[0, 0, "X", 1],
[0, 1, "X", 1],
[0, 0, "O", 1],
[0, 0, "O", 0],
[0, 1, "O", 1],
[2, 2, "X", 1],
[2, 3, "X", 1],
[2, 4, "O", 1],
]
expected_outcomes = [False, False, True, False, False, True, True, False]
test_outcomes = [
mechanics.is_legal_take(preset[0], preset[1], preset[2], grid, preset[3])
for preset in presets
]
self.assertListEqual(test_outcomes, expected_outcomes)
def test_own_piece(self):
grid = interface.construct_grid("XXXNNNOOOXXXNNNOOOXXXNNN")
presets = [[0, 0, "X"], [0, 0, "O"], [0, 6, "X"], [0, 6, "O"], [1, 1, "X"], [1, 1, "O"]]
expected_outcomes = [True, False, True, False, False, False]
test_outcomes = [
mechanics.is_own_piece(preset[0], preset[1], preset[2], grid) for preset in presets
]
self.assertListEqual(test_outcomes, expected_outcomes)
def test_can_make_any_move(self):
grid = interface.construct_grid("NONNNNNNNNNNNNNNNNNNNNXN")
self.assertEqual(mechanics.check_moves("O", grid), True)
self.assertEqual(mechanics.check_moves("X", grid), True)
grid = interface.construct_grid("XXXXXXOXXXXXXXXXXXXXXXNX")
self.assertEqual(mechanics.check_moves("O", grid), False)
self.assertEqual(mechanics.check_moves("X", grid), True)
grid = interface.construct_grid("NXNNNNNNNNNNNNNNNNNNNNNN")
self.assertEqual(mechanics.check_moves("O", grid), False)
self.assertEqual(mechanics.check_moves("X", grid), True)
class HillsTest(unittest.TestCase):
def test_unchanged_hills(self):
grid = interface.construct_grid("XXXNNNOOOXXXXNNOOOXXXNNN")
hills_uid = "02356"
mechanics.move_man_legal(3, 4, 3, 5, grid)
updated_hills_uid = mechanics.get_hills_numbers(grid)
self.assertEqual(updated_hills_uid, hills_uid)
def test_no_diagonal_hills(self):
grid = interface.construct_grid("XXXNNXOONXXXXNNOOOXXXNNN")
hills_uid = "0356"
mechanics.move_man_legal(3, 4, 2, 4, grid)
updated_hills_uid = mechanics.get_hills_numbers(grid)
self.assertEqual(updated_hills_uid, hills_uid)
class PhaseTest(unittest.TestCase):
def test_new_game_phase(self):
storage = SimpleStorage()
topic_name = "test"
merels = database.MerelsStorage(topic_name, storage)
merels.update_game(topic_name, "X", 0, 0, "NNNNNNNNNNNNNNNNNNNNNNNN", "", 0)
res = game_data.GameData(merels.get_game_data("test"))
self.assertEqual(res.get_phase(), 1)
merels.update_game(res.topic_name, "O", 5, 4, "XXXXNNNOOOOONNNNNNNNNNNN", "03", 0)
res = game_data.GameData(merels.get_game_data("test"))
self.assertEqual(res.board, "XXXXNNNOOOOONNNNNNNNNNNN")
self.assertEqual(res.get_phase(), 2)
merels.update_game(res.topic_name, "X", 6, 4, "XXXNNNNOOOOONNNNNNNNNNNN", "03", 0)
res = game_data.GameData(merels.get_game_data("test"))
self.assertEqual(res.board, "XXXNNNNOOOOONNNNNNNNNNNN")
self.assertEqual(res.get_phase(), 3) |
59 | all chains | """iptables helper functions.
Unlike the `firewall` module, these functions know nothing about PaaSTA and
could effectively be a third-party library. They just make working with
iptables a little bit easier.
"""
import collections
import contextlib
import logging
import iptc
log = logging.getLogger(__name__)
RULE_TARGET_SORT_ORDER = {
# all else defaults to '0'
"LOG": 1,
"REJECT": 2.0,
}
_RuleBase = collections.namedtuple(
"_RuleBase", ("protocol", "src", "dst", "target", "matches", "target_parameters")
)
class Rule(_RuleBase):
"""Rule representation.
Working with iptc's rule classes directly doesn't work well, since rules
represent actual existing iptables rules, and changes are applied
immediately. They're also difficult to compare.
"""
def __new__(cls, *args, **kwargs):
result = _RuleBase.__new__(cls, *args, **kwargs)
result.validate()
return result
def _replace(self, **kwargs):
result = super()._replace(**kwargs)
result.validate()
return result
def validate(self):
if self.target == "REJECT":
assert any(
name == "reject-with" for name, _ in self.target_parameters
), "REJECT rules must specify reject-with"
assert tuple(sorted(self.matches)) == self.matches, "matches should be sorted"
for match_name, params in self.matches:
for param_name, param_value in params:
assert (
"_" not in param_name
), f"use dashes instead of underscores in {param_name}"
assert isinstance(
param_value, tuple
), f"value of {param_name} should be tuple"
assert (
tuple(sorted(self.target_parameters)) == self.target_parameters
), "target_parameters should be sorted"
for param_name, param_value in self.target_parameters:
assert (
"_" not in param_name
), f"use dashes instead of underscores in {param_name}"
assert isinstance(
param_value, tuple
), f"value of {param_name} should be tuple"
@classmethod
def from_iptc(cls, rule):
fields = {
"protocol": rule.protocol,
"src": rule.src,
"dst": rule.dst,
"target": rule.target.name,
"matches": (),
"target_parameters": (),
}
for param_name, param_value in sorted(rule.target.get_all_parameters().items()):
fields["target_parameters"] += ((param_name, tuple(param_value)),)
matches = []
for match in rule.matches:
matches.append(
(
match.name,
tuple(
(param, tuple(value))
for param, value in sorted(match.get_all_parameters().items())
),
)
)
# ensure that matches are sorted for consistency with matching
fields["matches"] = tuple(sorted(matches))
return cls(**fields)
def to_iptc(self):
rule = iptc.Rule()
rule.protocol = self.protocol
rule.src = self.src
rule.dst = self.dst
target = rule.create_target(self.target)
for param_name, param_value in self.target_parameters:
target.set_parameter(param_name, param_value)
for name, params in self.matches:
match = rule.create_match(name)
for param_name, param_value in params:
match.set_parameter(param_name, param_value)
return rule
@contextlib.contextmanager
def iptables_txn(table):
"""Temporarily disable autocommit and commit at the end.
If an exception occurs, changes are rolled back.
By default, changes to iptables rules are applied immediately. In some
cases, we want to avoid that.
https://github.com/ldx/python-iptables#autocommit
"""
assert table.autocommit is True, table.autocommit
try:
table.autocommit = False
yield
table.commit()
finally:
table.refresh()
table.autocommit = True
class ChainDoesNotExist(Exception):
pass
def METHOD_NAME():
return {chain.name for chain in iptc.Table(iptc.Table.FILTER).chains}
def ensure_chain(chain, rules):
"""Idempotently ensure a chain exists and has an exact set of rules.
This function creates or updates an existing chain to match the rules
passed in.
This function will not reorder existing rules, but any new rules are always
inserted at the front of the chain.
"""
try:
current_rules = set(list_chain(chain))
except ChainDoesNotExist:
create_chain(chain)
current_rules = set()
for rule in rules:
if rule not in current_rules:
insert_rule(chain, rule)
extra_rules = current_rules - set(rules)
if extra_rules:
delete_rules(chain, extra_rules)
def _rule_sort_key(rule_tuple):
old_index, rule = rule_tuple
target_name = rule.target
return (RULE_TARGET_SORT_ORDER.get(target_name, 0), old_index)
def reorder_chain(chain_name):
"""Ensure that any REJECT rules are last, and any LOG rules are second-to-last"""
table = iptc.Table(iptc.Table.FILTER)
with iptables_txn(table):
rules = list_chain(chain_name)
chain = iptc.Chain(table, chain_name)
# sort the rules by rule_key, which uses (RULE_TARGET_SORT_ORDER, idx)
sorted_rules_with_indices = sorted(enumerate(rules), key=_rule_sort_key)
for new_index, (old_index, rule) in enumerate(sorted_rules_with_indices):
if new_index == old_index:
continue
log.debug(f"reordering chain {chain_name} rule {rule} to #{new_index}")
chain.replace_rule(rule.to_iptc(), new_index)
def ensure_rule(chain, rule):
rules = list_chain(chain)
if rule not in rules:
insert_rule(chain, rule)
def insert_rule(chain_name, rule):
log.debug(f"adding rule to {chain_name}: {rule}")
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), chain_name)
chain.insert_rule(rule.to_iptc())
def delete_rules(chain_name, rules):
log.debug(f"deleting rules from {chain_name}: {rules}")
table = iptc.Table(iptc.Table.FILTER)
with iptables_txn(table):
chain = iptc.Chain(table, chain_name)
for potential_rule in chain.rules:
if Rule.from_iptc(potential_rule) in rules:
chain.delete_rule(potential_rule)
def create_chain(chain_name):
log.debug(f"creating chain: {chain_name}")
iptc.Table(iptc.Table.FILTER).create_chain(chain_name)
def delete_chain(chain_name):
log.debug(f"deleting chain: {chain_name}")
chain = iptc.Chain(iptc.Table(iptc.Table.FILTER), chain_name)
chain.flush()
chain.delete()
def list_chain(chain_name):
"""List rules in a chain.
Returns a list of iptables rules, or raises ChainDoesNotExist.
"""
table = iptc.Table(iptc.Table.FILTER)
chain = iptc.Chain(table, chain_name)
# TODO: is there any way to do this without listing all chains? (probably slow)
# If the chain doesn't exist, chain.rules will be an empty list, so we need
# to make sure the chain actually _does_ exist.
if chain in table.chains:
return tuple(Rule.from_iptc(rule) for rule in chain.rules)
else:
raise ChainDoesNotExist(chain_name) |
60 | build arguments schema | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"mesh secret show",
is_preview=True,
)
class Show(AAZCommand):
"""Get the details of a secret.
"""
_aaz_info = {
"version": "2018-09-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicefabricmesh/secrets/{}", "2018-09-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def METHOD_NAME(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super().METHOD_NAME(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
_args_schema.name = AAZStrArg(
options=["-n", "--name"],
help="The name of the secret.",
required=True,
id_part="name",
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.SecretGet(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class SecretGet(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceFabricMesh/secrets/{secretResourceName}",
**self.url_parameters
)
@property
def method(self):
return "GET"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"secretResourceName", self.ctx.args.name,
skip_quote=True,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2018-09-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.id = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.location = AAZStrType(
flags={"required": True},
)
_schema_on_200.name = AAZStrType(
flags={"read_only": True},
)
_schema_on_200.properties = AAZObjectType(
flags={"required": True},
)
_schema_on_200.tags = AAZDictType()
_schema_on_200.type = AAZStrType(
flags={"read_only": True},
)
properties = cls._schema_on_200.properties
properties.content_type = AAZStrType(
serialized_name="contentType",
)
properties.description = AAZStrType()
properties.kind = AAZStrType(
flags={"required": True},
)
properties.provisioning_state = AAZStrType(
serialized_name="provisioningState",
flags={"read_only": True},
)
properties.status = AAZStrType()
properties.status_details = AAZStrType(
serialized_name="statusDetails",
flags={"read_only": True},
)
tags = cls._schema_on_200.tags
tags.Element = AAZStrType()
return cls._schema_on_200
class _ShowHelper:
"""Helper class for Show"""
__all__ = ["Show"] |
61 | download fr24 | # ruff: noqa: E501
from __future__ import annotations
import io
from pathlib import Path
from typing import Any, ClassVar
import requests
from tqdm.rich import tqdm
import pandas as pd
from ... import cache_expiration
from ...core.mixins import GeoDBMixin
from ...core.structure import Airport
__all__ = ["Airport", "Airports"]
class Airports(GeoDBMixin):
"""
An airport is accessible via its ICAO or IATA code. In case of doubt,
use the search method.
The representation of an airport is based on its geographical footprint.
Contours are fetched from OpenStreetMap (you need an Internet connection the
first time you call it) and put in cache.
A database of major world airports is available as:
>>> from traffic.data import airports
Airports information can be accessed with attributes:
>>> airports["EHAM"].latlon # doctest: +NUMBER
(52.3086, 4.7639)
>>> airports["EHAM"].iata
'AMS'
>>> airports["EHAM"].name
'Amsterdam Airport Schiphol'
"""
cache_dir: Path
expiration_days: None | int
src_dict: ClassVar[dict[str, tuple[str, str]]] = dict(
fr24=("airports_fr24.parquet", "download_fr24"),
open=("airports_ourairports.parquet", "download_airports"),
)
columns_options: ClassVar[dict[str, dict[str, Any]]] = dict( # type: ignore
name=dict(),
country=dict(justify="right"),
icao=dict(style="blue bold"),
iata=dict(),
latitude=dict(justify="left", max_width=10),
longitude=dict(justify="left", max_width=10),
)
def __init__(self, data: None | pd.DataFrame = None) -> None:
self._data: None | pd.DataFrame = data
self._src = "open"
def download_airports(self) -> None: # coverage: ignore
"""
Download an up to date version of the airports database from
`ourairports.com <https://ourairports.com/>`_
"""
from .. import session
f = session.get(
"https://ourairports.com/data/airports.csv", stream=True
)
total = int(f.headers["Content-Length"])
buffer = io.BytesIO()
for chunk in tqdm(
f.iter_content(1024),
total=total // 1024 + 1 if total % 1024 > 0 else 0,
desc="airports @ourairports.com",
):
buffer.write(chunk)
buffer.seek(0)
df = pd.read_csv(buffer)
f = session.get("https://ourairports.com/data/countries.csv")
buffer = io.BytesIO(f.content)
buffer.seek(0)
countries = pd.read_csv(buffer)
self._data = df.rename(
columns={
"latitude_deg": "latitude",
"longitude_deg": "longitude",
"elevation_ft": "altitude",
"iata_code": "iata",
"ident": "icao",
}
).merge(
countries[["code", "name"]].rename(
columns=dict(code="iso_country", name="country")
)
)[
[
"name",
"iata",
"icao",
"latitude",
"longitude",
"country",
"altitude",
"type",
"municipality",
]
]
self._data.to_parquet(self.cache_dir / "airports_ourairports.parquet")
def METHOD_NAME(self) -> None: # coverage: ignore
from .. import session
c = session.get(
"https://www.flightradar24.com/_json/airports.php",
headers={"user-agent": "Mozilla/5.0"},
)
self._data = (
pd.DataFrame.from_records(c.json()["rows"])
.assign(name=lambda df: df.name.str.strip())
.rename(
columns={
"lat": "latitude",
"lon": "longitude",
"alt": "altitude",
}
)
)
self._data.to_parquet(self.cache_dir / "airports_fr24.parquet")
@property
def data(self) -> pd.DataFrame:
if self._data is not None:
return self._data
cache_file, method_name = self.src_dict[self._src]
if not (self.cache_dir / cache_file).exists():
getattr(self, method_name)()
last_modification = (self.cache_dir / cache_file).lstat().st_mtime
delta = pd.Timestamp("now") - pd.Timestamp(last_modification * 1e9)
if delta > cache_expiration:
try:
getattr(self, method_name)()
except requests.ConnectionError:
pass
self._data = pd.read_parquet(self.cache_dir / cache_file)
return self._data
def __getitem__(self, name: str) -> None | Airport:
"""
Any airport can be accessed by the bracket notation.
:param name: the IATA or ICAO code of the airport
>>> from traffic.data import airports
>>> airports["EHAM"]
Airport(icao='EHAM', iata='AMS', name='Amsterdam Airport Schiphol', country='Netherlands', latitude=52.308, longitude=4.763, altitude=-11.0)
"""
if isinstance(name, int):
p = self.data.iloc[name]
else:
x = self.data.query(
"iata == @name.upper() or icao == @name.upper()"
)
if x.shape[0] == 0:
return None
p = x.iloc[0]
return Airport(
p.altitude,
p.country,
p.iata,
p.icao,
p.latitude,
p.longitude,
p["name"],
)
def search(self, name: str) -> "Airports":
"""
:param name: refers to the IATA or ICAO code, or part of the country
name, city name of full name of the airport.
>>> from traffic.data import airports
>>> airports.query('type == "large_airport"').search('Tokyo') # doctest: +SKIP
name country icao iata latitude longitude
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Narita International Airport Japan RJAA NRT 35.76 140.4
Tokyo Haneda International Airport Japan RJTT HND 35.55 139.8
"""
if "municipality" in self.data.columns:
return self.__class__(
self.data.query(
"iata == @name.upper() or "
"icao.str.contains(@name.upper()) or "
"country.str.upper().str.contains(@name.upper()) or "
"municipality.str.upper().str.contains(@name.upper()) or "
"name.str.upper().str.contains(@name.upper())"
),
)
else:
return self.__class__(
self.data.query(
"iata == @name.upper() or "
"icao.str.contains(@name.upper()) or "
"country.str.upper().str.contains(@name.upper()) or "
"name.str.upper().str.contains(@name.upper())"
),
) |
62 | is multicast | # Copyright (C) Dnspython Contributors, see LICENSE for text of ISC license
# Copyright (C) 2003-2017 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Generic Internet address helper functions."""
import socket
from typing import Any, Optional, Tuple
import dns.ipv4
import dns.ipv6
# We assume that AF_INET and AF_INET6 are always defined. We keep
# these here for the benefit of any old code (unlikely though that
# is!).
AF_INET = socket.AF_INET
AF_INET6 = socket.AF_INET6
def inet_pton(family: int, text: str) -> bytes:
"""Convert the textual form of a network address into its binary form.
*family* is an ``int``, the address family.
*text* is a ``str``, the textual address.
Raises ``NotImplementedError`` if the address family specified is not
implemented.
Returns a ``bytes``.
"""
if family == AF_INET:
return dns.ipv4.inet_aton(text)
elif family == AF_INET6:
return dns.ipv6.inet_aton(text, True)
else:
raise NotImplementedError
def inet_ntop(family: int, address: bytes) -> str:
"""Convert the binary form of a network address into its textual form.
*family* is an ``int``, the address family.
*address* is a ``bytes``, the network address in binary form.
Raises ``NotImplementedError`` if the address family specified is not
implemented.
Returns a ``str``.
"""
if family == AF_INET:
return dns.ipv4.inet_ntoa(address)
elif family == AF_INET6:
return dns.ipv6.inet_ntoa(address)
else:
raise NotImplementedError
def af_for_address(text: str) -> int:
"""Determine the address family of a textual-form network address.
*text*, a ``str``, the textual address.
Raises ``ValueError`` if the address family cannot be determined
from the input.
Returns an ``int``.
"""
try:
dns.ipv4.inet_aton(text)
return AF_INET
except Exception:
try:
dns.ipv6.inet_aton(text, True)
return AF_INET6
except Exception:
raise ValueError
def METHOD_NAME(text: str) -> bool:
"""Is the textual-form network address a multicast address?
*text*, a ``str``, the textual address.
Raises ``ValueError`` if the address family cannot be determined
from the input.
Returns a ``bool``.
"""
try:
first = dns.ipv4.inet_aton(text)[0]
return first >= 224 and first <= 239
except Exception:
try:
first = dns.ipv6.inet_aton(text, True)[0]
return first == 255
except Exception:
raise ValueError
def is_address(text: str) -> bool:
"""Is the specified string an IPv4 or IPv6 address?
*text*, a ``str``, the textual address.
Returns a ``bool``.
"""
try:
dns.ipv4.inet_aton(text)
return True
except Exception:
try:
dns.ipv6.inet_aton(text, True)
return True
except Exception:
return False
def low_level_address_tuple(
high_tuple: Tuple[str, int], af: Optional[int] = None
) -> Any:
"""Given a "high-level" address tuple, i.e.
an (address, port) return the appropriate "low-level" address tuple
suitable for use in socket calls.
If an *af* other than ``None`` is provided, it is assumed the
address in the high-level tuple is valid and has that af. If af
is ``None``, then af_for_address will be called.
"""
address, port = high_tuple
if af is None:
af = af_for_address(address)
if af == AF_INET:
return (address, port)
elif af == AF_INET6:
i = address.find("%")
if i < 0:
# no scope, shortcut!
return (address, port, 0, 0)
# try to avoid getaddrinfo()
addrpart = address[:i]
scope = address[i + 1 :]
if scope.isdigit():
return (addrpart, port, 0, int(scope))
try:
return (addrpart, port, 0, socket.if_nametoindex(scope))
except AttributeError: # pragma: no cover (we can't really test this)
ai_flags = socket.AI_NUMERICHOST
((*_, tup), *_) = socket.getaddrinfo(address, port, flags=ai_flags)
return tup
else:
raise NotImplementedError(f"unknown address family {af}")
def any_for_af(af):
"""Return the 'any' address for the specified address family."""
if af == socket.AF_INET:
return "0.0.0.0"
elif af == socket.AF_INET6:
return "::"
raise NotImplementedError(f"unknown address family {af}") |
63 | test requires grad outputs side effects | # Owner(s): ["oncall: jit"]
import torch
from torch.testing._internal.common_utils import skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase
from typing import List
@skipIfTorchDynamo()
class TestAutodiffJit(JitTestCase):
def test_undefined_tensor_lists(self):
def fn(tensor_list: List[torch.Tensor], add_tensor):
cat = torch.cat(tensor_list, dim=1)
r = torch.sin(cat + add_tensor)
return r
fn_s = torch.jit.script(fn)
a = torch.rand((3, 6), requires_grad=True)
b = torch.rand((3, 10), requires_grad=True)
x = [a, b]
y = torch.rand((3, 16), requires_grad=True)
ret = fn_s(x, y)
ret.sum().backward()
ret = fn_s(x, y)
ret.sum().backward()
ret = fn_s(x, y)
s = ret.sum()
# backward_fn expects 2 inputs: (grad_output, current_grad_r)
# current_grad_r is provided because we need to add this contribution
# to grad_r when we return it.
backward_fn = s.grad_fn.next_functions[0][0]
# check behavior with defined tensor
grad_out = torch.rand((3, 16))
grad_inputs = backward_fn(grad_out, None)
# expect 3 tensors: grad_y, grad_a, grad_b
self.assertEqual(3, len(grad_inputs))
for x in grad_inputs:
self.assertTrue(isinstance(x, torch.Tensor))
# now test with undefined grad_out
grad_inputs = backward_fn(None, None)
# expect all of them to be None
self.assertEqual(3, len(grad_inputs))
for x in grad_inputs:
if x is not None:
self.assertEqual(0, torch.max(torch.abs(x)).item())
def test_requires_grad_outputs(self):
# outputs should require_grad only if eager outputs would require_grad.
def fn(a, b, c):
return a.relu() + b.relu(), c.relu()
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
fn_s = torch.jit.script(fn)
for i in range(4):
x, y = fn_s(a, b, c)
self.assertFalse(x.requires_grad)
self.assertTrue(y.requires_grad)
def test_requires_grad_outputs_profiled_twice(self):
# the value "r" is used twice, by gammaln and by entr, so it is profiled twice.
# So during autodiff graph formation the profile nodes are unmerged because
# they are aliasing. Then the DifferentiableGraph doesn't have a profile
# node on the output. The requires_grad info should then be added onto the
# output value (otherwise autodiff will make the output require_grad).
# Note: this relies on gammaln and entr not having autodiff implementations.
def fn(a, b, c):
r = a.relu().relu()
return torch.special.gammaln(r), torch.special.entr(r), c.cos().relu()
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
for i in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
def METHOD_NAME(self):
# same as above, but also add a CallFunction in between.
@torch.jit.ignore
def python_fn(x):
return x.relu()
def fn(a, b, c):
r = a.relu().relu()
z = python_fn(r)
return torch.relu(r), torch.nn.functional.gelu(r), c.cos().relu()
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
for i in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
def test_autodiff_requires_grad_nograd(self):
@torch.jit.ignore
def python_fn(x):
return x.relu()
def fn(a, b, c):
x = a.sin().relu()
y = python_fn(b)
with torch.no_grad():
z = x + c
return x, y, z
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=True)
b = torch.rand((10, 10), requires_grad=True)
c = torch.rand((10, 10), requires_grad=True)
for i in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad) |
64 | value to string | from enum import Enum
import django
from django.utils import six
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models.fields import BLANK_CHOICE_DASH
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from .forms import EnumChoiceField
class CastOnAssignDescriptor(object):
"""
A property descriptor which ensures that `field.to_python()` is called on _every_ assignment to the field.
This used to be provided by the `django.db.models.subclassing.Creator` class, which in turn
was used by the deprecated-in-Django-1.10 `SubfieldBase` class, hence the reimplementation here.
"""
def __init__(self, field):
self.field = field
def __get__(self, obj, type=None):
if obj is None:
return self
return obj.__dict__[self.field.name]
def __set__(self, obj, value):
obj.__dict__[self.field.name] = self.field.to_python(value)
class EnumFieldMixin(object):
def __init__(self, enum, **options):
if isinstance(enum, six.string_types):
self.enum = import_string(enum)
else:
self.enum = enum
if "choices" not in options:
options["choices"] = [ # choices for the TypedChoiceField
(i, getattr(i, 'label', i.name))
for i in self.enum
]
super(EnumFieldMixin, self).__init__(**options)
def contribute_to_class(self, cls, name):
super(EnumFieldMixin, self).contribute_to_class(cls, name)
setattr(cls, name, CastOnAssignDescriptor(self))
def to_python(self, value):
if value is None or value == '':
return None
if isinstance(value, self.enum):
return value
for m in self.enum:
if value == m:
return m
if value == m.value or str(value) == str(m.value) or str(value) == str(m):
return m
raise ValidationError('%s is not a valid value for enum %s' % (value, self.enum), code="invalid_enum_value")
def get_prep_value(self, value):
if value is None:
return None
if isinstance(value, self.enum): # Already the correct type -- fast path
return value.value
return self.enum(value).value
def from_db_value(self, value, expression, connection, context):
return self.to_python(value)
def METHOD_NAME(self, obj):
"""
This method is needed to support proper serialization. While its name is value_to_string()
the real meaning of the method is to convert the value to some serializable format.
Since most of the enum values are strings or integers we WILL NOT convert it to string
to enable integers to be serialized natively.
"""
if django.VERSION >= (2, 0):
value = self.value_from_object(obj)
else:
value = self._get_val_from_obj(obj)
return value.value if value else None
def get_default(self):
if self.has_default():
if self.default is None:
return None
if isinstance(self.default, Enum):
return self.default
return self.enum(self.default)
return super(EnumFieldMixin, self).get_default()
def deconstruct(self):
name, path, args, kwargs = super(EnumFieldMixin, self).deconstruct()
kwargs['enum'] = self.enum
kwargs.pop('choices', None)
if 'default' in kwargs:
if hasattr(kwargs["default"], "value"):
kwargs["default"] = kwargs["default"].value
return name, path, args, kwargs
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
# Force enum fields' options to use the `value` of the enumeration
# member as the `value` of SelectFields and similar.
return [
(i.value if isinstance(i, Enum) else i, display)
for (i, display)
in super(EnumFieldMixin, self).get_choices(include_blank, blank_choice)
]
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
if not choices_form_class:
choices_form_class = EnumChoiceField
return super(EnumFieldMixin, self).formfield(
form_class=form_class,
choices_form_class=choices_form_class,
**kwargs
)
class EnumField(EnumFieldMixin, models.CharField):
def __init__(self, enum, **kwargs):
kwargs.setdefault("max_length", 10)
super(EnumField, self).__init__(enum, **kwargs)
self.validators = []
class EnumIntegerField(EnumFieldMixin, models.IntegerField):
@cached_property
def validators(self):
# Skip IntegerField validators, since they will fail with
# TypeError: unorderable types: TheEnum() < int()
# when used database reports min_value or max_value from
# connection.ops.integer_field_range method.
next = super(models.IntegerField, self)
return next.validators
def get_prep_value(self, value):
if value is None:
return None
if isinstance(value, Enum):
return value.value
try:
return int(value)
except ValueError:
return self.to_python(value).value |
65 | get response plan output | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetResponsePlanResult',
'AwaitableGetResponsePlanResult',
'get_response_plan',
'get_response_plan_output',
]
@pulumi.output_type
class GetResponsePlanResult:
"""
A collection of values returned by getResponsePlan.
"""
def __init__(__self__, actions=None, arn=None, chat_channels=None, display_name=None, engagements=None, id=None, incident_templates=None, integrations=None, name=None, tags=None):
if actions and not isinstance(actions, list):
raise TypeError("Expected argument 'actions' to be a list")
pulumi.set(__self__, "actions", actions)
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if chat_channels and not isinstance(chat_channels, list):
raise TypeError("Expected argument 'chat_channels' to be a list")
pulumi.set(__self__, "chat_channels", chat_channels)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if engagements and not isinstance(engagements, list):
raise TypeError("Expected argument 'engagements' to be a list")
pulumi.set(__self__, "engagements", engagements)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if incident_templates and not isinstance(incident_templates, list):
raise TypeError("Expected argument 'incident_templates' to be a list")
pulumi.set(__self__, "incident_templates", incident_templates)
if integrations and not isinstance(integrations, list):
raise TypeError("Expected argument 'integrations' to be a list")
pulumi.set(__self__, "integrations", integrations)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def actions(self) -> Sequence['outputs.GetResponsePlanActionResult']:
"""
(Optional) The actions that the response plan starts at the beginning of an incident.
"""
return pulumi.get(self, "actions")
@property
@pulumi.getter
def arn(self) -> str:
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="chatChannels")
def chat_channels(self) -> Sequence[str]:
"""
The Chatbot chat channel used for collaboration during an incident.
"""
return pulumi.get(self, "chat_channels")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
The long format of the response plan name. This field can contain spaces.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def engagements(self) -> Sequence[str]:
"""
The Amazon Resource Name (ARN) for the contacts and escalation plans that the response plan engages during an incident.
"""
return pulumi.get(self, "engagements")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="incidentTemplates")
def incident_templates(self) -> Sequence['outputs.GetResponsePlanIncidentTemplateResult']:
return pulumi.get(self, "incident_templates")
@property
@pulumi.getter
def integrations(self) -> Sequence['outputs.GetResponsePlanIntegrationResult']:
"""
Information about third-party services integrated into the response plan. The following values are supported:
"""
return pulumi.get(self, "integrations")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the PagerDuty configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
The tags applied to the response plan.
"""
return pulumi.get(self, "tags")
class AwaitableGetResponsePlanResult(GetResponsePlanResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetResponsePlanResult(
actions=self.actions,
arn=self.arn,
chat_channels=self.chat_channels,
display_name=self.display_name,
engagements=self.engagements,
id=self.id,
incident_templates=self.incident_templates,
integrations=self.integrations,
name=self.name,
tags=self.tags)
def get_response_plan(arn: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetResponsePlanResult:
"""
Use this data source to manage a response plan in AWS Systems Manager Incident Manager.
## Example Usage
:param str arn: The Amazon Resource Name (ARN) of the response plan.
:param Mapping[str, str] tags: The tags applied to the response plan.
"""
__args__ = dict()
__args__['arn'] = arn
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:ssmincidents/getResponsePlan:getResponsePlan', __args__, opts=opts, typ=GetResponsePlanResult).value
return AwaitableGetResponsePlanResult(
actions=pulumi.get(__ret__, 'actions'),
arn=pulumi.get(__ret__, 'arn'),
chat_channels=pulumi.get(__ret__, 'chat_channels'),
display_name=pulumi.get(__ret__, 'display_name'),
engagements=pulumi.get(__ret__, 'engagements'),
id=pulumi.get(__ret__, 'id'),
incident_templates=pulumi.get(__ret__, 'incident_templates'),
integrations=pulumi.get(__ret__, 'integrations'),
name=pulumi.get(__ret__, 'name'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_response_plan)
def METHOD_NAME(arn: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetResponsePlanResult]:
"""
Use this data source to manage a response plan in AWS Systems Manager Incident Manager.
## Example Usage
:param str arn: The Amazon Resource Name (ARN) of the response plan.
:param Mapping[str, str] tags: The tags applied to the response plan.
"""
... |
66 | id | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetServerKeyResult',
'AwaitableGetServerKeyResult',
'get_server_key',
'get_server_key_output',
]
@pulumi.output_type
class GetServerKeyResult:
"""
A server key.
"""
def __init__(__self__, creation_date=None, METHOD_NAME=None, kind=None, location=None, name=None, server_key_type=None, subregion=None, thumbprint=None, type=None, uri=None):
if creation_date and not isinstance(creation_date, str):
raise TypeError("Expected argument 'creation_date' to be a str")
pulumi.set(__self__, "creation_date", creation_date)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if server_key_type and not isinstance(server_key_type, str):
raise TypeError("Expected argument 'server_key_type' to be a str")
pulumi.set(__self__, "server_key_type", server_key_type)
if subregion and not isinstance(subregion, str):
raise TypeError("Expected argument 'subregion' to be a str")
pulumi.set(__self__, "subregion", subregion)
if thumbprint and not isinstance(thumbprint, str):
raise TypeError("Expected argument 'thumbprint' to be a str")
pulumi.set(__self__, "thumbprint", thumbprint)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if uri and not isinstance(uri, str):
raise TypeError("Expected argument 'uri' to be a str")
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="creationDate")
def creation_date(self) -> Optional[str]:
"""
The server key creation date.
"""
return pulumi.get(self, "creation_date")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of encryption protector. This is metadata used for the Azure portal experience.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="serverKeyType")
def server_key_type(self) -> str:
"""
The server key type like 'ServiceManaged', 'AzureKeyVault'.
"""
return pulumi.get(self, "server_key_type")
@property
@pulumi.getter
def subregion(self) -> str:
"""
Subregion of the server key.
"""
return pulumi.get(self, "subregion")
@property
@pulumi.getter
def thumbprint(self) -> Optional[str]:
"""
Thumbprint of the server key.
"""
return pulumi.get(self, "thumbprint")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def uri(self) -> Optional[str]:
"""
The URI of the server key.
"""
return pulumi.get(self, "uri")
class AwaitableGetServerKeyResult(GetServerKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetServerKeyResult(
creation_date=self.creation_date,
METHOD_NAME=self.METHOD_NAME,
kind=self.kind,
location=self.location,
name=self.name,
server_key_type=self.server_key_type,
subregion=self.subregion,
thumbprint=self.thumbprint,
type=self.type,
uri=self.uri)
def get_server_key(key_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetServerKeyResult:
"""
Gets a server key.
:param str key_name: The name of the server key to be retrieved.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['keyName'] = key_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:sql/v20150501preview:getServerKey', __args__, opts=opts, typ=GetServerKeyResult).value
return AwaitableGetServerKeyResult(
creation_date=pulumi.get(__ret__, 'creation_date'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
kind=pulumi.get(__ret__, 'kind'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
server_key_type=pulumi.get(__ret__, 'server_key_type'),
subregion=pulumi.get(__ret__, 'subregion'),
thumbprint=pulumi.get(__ret__, 'thumbprint'),
type=pulumi.get(__ret__, 'type'),
uri=pulumi.get(__ret__, 'uri'))
@_utilities.lift_output_func(get_server_key)
def get_server_key_output(key_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetServerKeyResult]:
"""
Gets a server key.
:param str key_name: The name of the server key to be retrieved.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
... |
67 | test record subtraction with none adds | # Copyright (c) 2022. The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details
import itertools
import pytest
from ldap_sync.concepts import types
from ldap_sync.concepts.action import AddAction, DeleteAction, IdleAction, ModifyAction
from ldap_sync.concepts.record import UserRecord, escape_and_normalize_attrs, GroupRecord, Record
from ldap_sync.record_diff import diff_records, diff_attributes, iter_zip_dicts
from ldap_sync.concepts.types import DN
@pytest.fixture(scope="module")
def dn() -> DN:
return DN("uid=foo")
@pytest.fixture(scope="module")
def record(dn) -> UserRecord:
return UserRecord(dn=dn, attrs={'mail': 'shizzle'})
@pytest.fixture(scope="module")
def group_record(dn) -> GroupRecord:
return GroupRecord(dn=dn, attrs={})
def test_none_diff_raises():
with pytest.raises(ValueError, match="cannot diff.*nonexistent"):
diff_records(None, None)
def test_diff_other_dn_raises(record, dn):
with pytest.raises(TypeError, match="diff.*different dn"):
diff_records(record, UserRecord(dn=types.DN(f"_{dn}"), attrs={}))
def test_heterogeneous_diff_raises(record, group_record):
with pytest.raises(TypeError, match="Cannot diff.*Record"):
diff_records(record, group_record) # type: ignore
def METHOD_NAME(record):
difference = diff_records(None, record)
assert isinstance(difference, AddAction)
assert difference.record_dn == record.dn
assert difference.nonempty_attrs.items() <= record.attrs.items()
assert all((
not val
for key, val in record.attrs.items()
if key not in difference.nonempty_attrs
))
def test_none_subtracted_by_record_deletes(record):
difference = diff_records(record, None)
assert isinstance(difference, DeleteAction)
assert difference.record_dn == record.dn
def test_different_dn_raises_typeerror(record):
with pytest.raises(TypeError, match="different dn"):
_ = diff_records(UserRecord(dn=DN("notatest"), attrs={}), record)
def test_same_record_subtraction_idles(record):
difference = diff_records(record, record)
assert isinstance(difference, IdleAction)
@pytest.mark.parametrize("record_class", (UserRecord, GroupRecord))
@pytest.mark.parametrize("attrs_one, attrs_other, expected_diff", (
({}, {"cn": "foo"}, {"cn": "foo"}),
({"cn": "notfoo"}, {"cn": "foo"}, {"cn": "foo"}),
))
def test_modification(dn, record_class: type[Record], attrs_one, attrs_other, expected_diff):
one = record_class(dn=dn, attrs=attrs_one)
other = record_class(dn=dn, attrs=attrs_other)
action = diff_records(one, other)
assert isinstance(action, ModifyAction)
assert one.attrs | action.modifications == other.attrs
class TestAttributeDiff:
@pytest.mark.parametrize("attrs_current, attrs_desired, modifications", [
({"gecos": "bar"},
{"gecos": None},
{"gecos": []},),
({"foo": "bar"},
{"foo": "bar", "mail": "admin@sci.hub"},
{"mail": ["admin@sci.hub"]},),
({"gecos": "bar", "mail": "admin@sci.hub"},
{"gecos": "bar", "mail": ""},
{"mail": []},),
({"gecos": "baz", "mail": "admin@sci.hub"},
{"gecos": "bar", "mail": "admin@sci.hub"},
{"gecos": ["bar"]},),
])
def test_modify_action(
self,
dn,
attrs_current: types.NormalizedAttributes,
attrs_desired: types.NormalizedAttributes,
modifications: types.NormalizedAttributes,
):
assert (
diff_attributes(
desired_attrs=escape_and_normalize_attrs(attrs_desired),
current_attrs=escape_and_normalize_attrs(attrs_current),
)
== modifications
)
@pytest.mark.parametrize("d1, d2, expected", [
({}, {},
{}),
({"a": 1}, {"b": 2},
{"a": (1, None), "b": (None, 2)}),
({"a": 1, "b": 2}, {"b": 3, "c": 1},
{"a": (1, None), "b": (2, 3), "c": (None, 1)})
])
def test_dict_zipping(d1, d2, expected):
assert dict(iter_zip_dicts(d1, d2)) == expected
@pytest.mark.parametrize("d1, d2", itertools.combinations([
{}, {"a": 1}, {"a": 2}, {"a": 1, "b": 2}, {"a": 10, "c": 2},
], 2))
def test_dict_zipping_and_projection_is_merging(d1: dict[str, int], d2: dict[str, int]):
assert {k: v2 or v1 for k, (v1, v2) in iter_zip_dicts(d1, d2)} == {**d1, **d2} |
68 | build tree | import unittest
from Cython.Compiler.Visitor import PrintTree
from Cython.TestUtils import TransformTest
from Cython.Compiler.TreePath import find_first, find_all
from Cython.Compiler import Nodes, ExprNodes
class TestTreePath(TransformTest):
_tree = None
def METHOD_NAME(self):
if self._tree is None:
self._tree = self.run_pipeline([], u"""
def decorator(fun): # DefNode
return fun # ReturnStatNode, NameNode
@decorator # NameNode
def decorated(): # DefNode
pass
""")
return self._tree
def test_node_path(self):
t = self.METHOD_NAME()
self.assertEqual(2, len(find_all(t, "//DefNode")))
self.assertEqual(2, len(find_all(t, "//NameNode")))
self.assertEqual(1, len(find_all(t, "//ReturnStatNode")))
self.assertEqual(1, len(find_all(t, "//DefNode//ReturnStatNode")))
def test_node_path_star(self):
t = self.METHOD_NAME()
self.assertEqual(10, len(find_all(t, "//*")))
self.assertEqual(8, len(find_all(t, "//DefNode//*")))
self.assertEqual(0, len(find_all(t, "//NameNode//*")))
def test_node_path_attribute(self):
t = self.METHOD_NAME()
self.assertEqual(2, len(find_all(t, "//NameNode/@name")))
self.assertEqual(['fun', 'decorator'], find_all(t, "//NameNode/@name"))
def test_node_path_attribute_dotted(self):
t = self.METHOD_NAME()
self.assertEqual(1, len(find_all(t, "//ReturnStatNode/@value.name")))
self.assertEqual(['fun'], find_all(t, "//ReturnStatNode/@value.name"))
def test_node_path_child(self):
t = self.METHOD_NAME()
self.assertEqual(1, len(find_all(t, "//DefNode/ReturnStatNode/NameNode")))
self.assertEqual(1, len(find_all(t, "//ReturnStatNode/NameNode")))
def test_node_path_node_predicate(self):
t = self.METHOD_NAME()
self.assertEqual(0, len(find_all(t, "//DefNode[.//ForInStatNode]")))
self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
self.assertEqual(1, len(find_all(t, "//ReturnStatNode[./NameNode]")))
self.assertEqual(Nodes.ReturnStatNode,
type(find_first(t, "//ReturnStatNode[./NameNode]")))
def test_node_path_node_predicate_step(self):
t = self.METHOD_NAME()
self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode]")))
self.assertEqual(8, len(find_all(t, "//DefNode[.//NameNode]//*")))
self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode]//ReturnStatNode")))
self.assertEqual(Nodes.ReturnStatNode,
type(find_first(t, "//DefNode[.//NameNode]//ReturnStatNode")))
def test_node_path_attribute_exists(self):
t = self.METHOD_NAME()
self.assertEqual(2, len(find_all(t, "//NameNode[@name]")))
self.assertEqual(ExprNodes.NameNode,
type(find_first(t, "//NameNode[@name]")))
def test_node_path_attribute_exists_not(self):
t = self.METHOD_NAME()
self.assertEqual(0, len(find_all(t, "//NameNode[not(@name)]")))
self.assertEqual(2, len(find_all(t, "//NameNode[not(@honking)]")))
def test_node_path_and(self):
t = self.METHOD_NAME()
self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode and .//NameNode]")))
self.assertEqual(0, len(find_all(t, "//NameNode[@honking and @name]")))
self.assertEqual(0, len(find_all(t, "//NameNode[@name and @honking]")))
self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name] and @name]")))
def test_node_path_attribute_string_predicate(self):
t = self.METHOD_NAME()
self.assertEqual(1, len(find_all(t, "//NameNode[@name = 'decorator']")))
def test_node_path_recursive_predicate(self):
t = self.METHOD_NAME()
self.assertEqual(2, len(find_all(t, "//DefNode[.//NameNode[@name]]")))
self.assertEqual(1, len(find_all(t, "//DefNode[.//NameNode[@name = 'decorator']]")))
self.assertEqual(1, len(find_all(t, "//DefNode[.//ReturnStatNode[./NameNode[@name = 'fun']]/NameNode]")))
if __name__ == '__main__':
unittest.main() |
69 | created date | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVirtualGatewayResult',
'AwaitableGetVirtualGatewayResult',
'get_virtual_gateway',
'get_virtual_gateway_output',
]
@pulumi.output_type
class GetVirtualGatewayResult:
"""
A collection of values returned by getVirtualGateway.
"""
def __init__(__self__, arn=None, METHOD_NAME=None, id=None, last_updated_date=None, mesh_name=None, mesh_owner=None, name=None, resource_owner=None, specs=None, tags=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if last_updated_date and not isinstance(last_updated_date, str):
raise TypeError("Expected argument 'last_updated_date' to be a str")
pulumi.set(__self__, "last_updated_date", last_updated_date)
if mesh_name and not isinstance(mesh_name, str):
raise TypeError("Expected argument 'mesh_name' to be a str")
pulumi.set(__self__, "mesh_name", mesh_name)
if mesh_owner and not isinstance(mesh_owner, str):
raise TypeError("Expected argument 'mesh_owner' to be a str")
pulumi.set(__self__, "mesh_owner", mesh_owner)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if resource_owner and not isinstance(resource_owner, str):
raise TypeError("Expected argument 'resource_owner' to be a str")
pulumi.set(__self__, "resource_owner", resource_owner)
if specs and not isinstance(specs, list):
raise TypeError("Expected argument 'specs' to be a list")
pulumi.set(__self__, "specs", specs)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def arn(self) -> str:
"""
ARN of the virtual gateway.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="createdDate")
def METHOD_NAME(self) -> str:
"""
Creation date of the virtual gateway.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter
def id(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastUpdatedDate")
def last_updated_date(self) -> str:
"""
Last update date of the virtual gateway.
"""
return pulumi.get(self, "last_updated_date")
@property
@pulumi.getter(name="meshName")
def mesh_name(self) -> str:
return pulumi.get(self, "mesh_name")
@property
@pulumi.getter(name="meshOwner")
def mesh_owner(self) -> str:
return pulumi.get(self, "mesh_owner")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceOwner")
def resource_owner(self) -> str:
"""
Resource owner's AWS account ID.
"""
return pulumi.get(self, "resource_owner")
@property
@pulumi.getter
def specs(self) -> Sequence['outputs.GetVirtualGatewaySpecResult']:
"""
Virtual gateway specification. See the `appmesh.VirtualGateway` resource for details.
"""
return pulumi.get(self, "specs")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
Map of tags.
"""
return pulumi.get(self, "tags")
class AwaitableGetVirtualGatewayResult(GetVirtualGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualGatewayResult(
arn=self.arn,
METHOD_NAME=self.METHOD_NAME,
id=self.id,
last_updated_date=self.last_updated_date,
mesh_name=self.mesh_name,
mesh_owner=self.mesh_owner,
name=self.name,
resource_owner=self.resource_owner,
specs=self.specs,
tags=self.tags)
def get_virtual_gateway(mesh_name: Optional[str] = None,
name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualGatewayResult:
"""
Data source for managing an AWS App Mesh Virtual Gateway.
## Example Usage
:param str mesh_name: Name of the service mesh in which the virtual gateway exists.
:param str name: Name of the virtual gateway.
:param Mapping[str, str] tags: Map of tags.
"""
__args__ = dict()
__args__['meshName'] = mesh_name
__args__['name'] = name
__args__['tags'] = tags
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:appmesh/getVirtualGateway:getVirtualGateway', __args__, opts=opts, typ=GetVirtualGatewayResult).value
return AwaitableGetVirtualGatewayResult(
arn=pulumi.get(__ret__, 'arn'),
METHOD_NAME=pulumi.get(__ret__, 'created_date'),
id=pulumi.get(__ret__, 'id'),
last_updated_date=pulumi.get(__ret__, 'last_updated_date'),
mesh_name=pulumi.get(__ret__, 'mesh_name'),
mesh_owner=pulumi.get(__ret__, 'mesh_owner'),
name=pulumi.get(__ret__, 'name'),
resource_owner=pulumi.get(__ret__, 'resource_owner'),
specs=pulumi.get(__ret__, 'specs'),
tags=pulumi.get(__ret__, 'tags'))
@_utilities.lift_output_func(get_virtual_gateway)
def get_virtual_gateway_output(mesh_name: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVirtualGatewayResult]:
"""
Data source for managing an AWS App Mesh Virtual Gateway.
## Example Usage
:param str mesh_name: Name of the service mesh in which the virtual gateway exists.
:param str name: Name of the virtual gateway.
:param Mapping[str, str] tags: Map of tags.
"""
... |
70 | gitlab project name to path | import pickle
import hashlib
import re
import markdown
from markdown.extensions import Extension
from pgpdump.packet import SignaturePacket
from django.core.cache import cache
from django.db import connections, router
from django.http import HttpResponse
from django.utils.timezone import now
from django.template.defaultfilters import slugify
def cache_function_key(func, args, kwargs):
raw = [func.__name__, func.__module__, args, kwargs]
pickled = pickle.dumps(raw, protocol=pickle.HIGHEST_PROTOCOL)
key = hashlib.md5(pickled).hexdigest()
return 'cache_function.' + func.__name__ + '.' + key
def cache_function(length):
"""
A variant of the snippet posted by Jeff Wheeler at
http://www.djangosnippets.org/snippets/109/
Caches a function, using the function and its arguments as the key, and the
return value as the value saved. It passes all arguments on to the
function, as it should.
The decorator itself takes a length argument, which is the number of
seconds the cache will keep the result around.
"""
def decorator(func):
def inner_func(*args, **kwargs):
key = cache_function_key(func, args, kwargs)
value = cache.get(key)
if value is not None:
return value
else:
result = func(*args, **kwargs)
cache.set(key, result, length)
return result
return inner_func
return decorator
def clear_cache_function(func, args, kwargs):
key = cache_function_key(func, args, kwargs)
cache.delete(key)
def empty_response():
empty = HttpResponse('')
# designating response as 'streaming' forces ConditionalGetMiddleware to
# not add a 'Content-Length: 0' header
empty.streaming = True
return empty
# utility to make a pair of django choices
make_choice = lambda l: [(str(m), str(m)) for m in l] # noqa E741
def set_created_field(sender, **kwargs):
'''This will set the 'created' field on any object to the current UTC time
if it is unset.
Additionally, this will set the 'last_modified' field on any object to the
current UTC time on any save of the object.
For use as a pre_save signal handler.'''
obj = kwargs['instance']
time = now()
if hasattr(obj, 'created') and not obj.created:
obj.created = time
if hasattr(obj, 'last_modified'):
obj.last_modified = time
def find_unique_slug(model, title):
'''Attempt to find a unique slug for this model with given title.'''
existing = set(model.objects.values_list(
'slug', flat=True).order_by().distinct())
suffixed = slug = slugify(title)
suffix = 0
while suffixed in existing:
suffix += 1
suffixed = "%s-%d" % (slug, suffix)
return suffixed
def database_vendor(model, mode='read'):
if mode == 'read':
database = router.db_for_read(model)
elif mode == 'write':
database = router.db_for_write(model)
else:
raise Exception('Invalid database mode specified')
return connections[database].vendor
class EscapeHtml(Extension):
def extendMarkdown(self, md):
md.preprocessors.deregister('html_block')
md.inlinePatterns.deregister('html')
def parse_markdown(text, allow_html=False):
if allow_html:
return markdown.markdown(text)
ext = [EscapeHtml()]
return markdown.markdown(text, extensions=ext)
def groupby_preserve_order(iterable, keyfunc):
'''Take an iterable and regroup using keyfunc to determine whether items
belong to the same group. The order of the iterable is preserved and
similar keys do not have to be consecutive. This means the earliest
occurrence of a given key will determine the order of the lists in the
returned list.'''
seen_keys = {}
result = []
for item in iterable:
key = keyfunc(item)
group = seen_keys.get(key, None)
if group is None:
group = []
seen_keys[key] = group
result.append(group)
group.append(item)
return result
def METHOD_NAME(name: str) -> str:
'''Convert a Gitlab project name to variant which the Gitlab encodes in
its url / API for example mysql++ becomes mysqlplusplus.'''
name = re.sub(r'([a-zA-Z0-9]+)\+([a-zA-Z]+)', r'\1-\2', name)
name = re.sub(r'\+', r'plus', name)
name = re.sub(r'[^a-zA-Z0-9_\-\.]', r'-', name)
name = re.sub(r'[_\-]{2,}', r'-', name)
name = re.sub(r'^tree$', r'unix-tree', name)
return name
class PackageStandin(object):
'''Resembles a Package object, and has a few of the same fields, but is
really a link to a pkgbase that has no package with matching pkgname.'''
def __init__(self, package):
self.package = package
self.pkgname = package.pkgbase
def __getattr__(self, name):
return getattr(self.package, name)
def get_absolute_url(self):
return f'/packages/{self.repo.name.lower()}/{self.arch.name}/{self.pkgname}/'
class DependStandin(object):
'''Resembles a Depend object, and has a few of the same fields, but is
really a link to a base package rather than a single package.'''
def __init__(self, depends):
self._depends = depends
first = depends[0]
self.name = first.name
self.version = first.version
self.comparison = first.comparison
self.description = first.description
self.deptype = first.deptype
self.pkg = first.pkg.base_package() or PackageStandin(first.pkg)
class SignatureWrapper(SignaturePacket):
'Decode key_id from raw SignaturePacket'
def __init__(self, packet):
for field in ("sig_version", "creation_time", "expiration_time"):
setattr(self, field, getattr(packet, field))
self.key_id = packet.key_id.decode() if packet.key_id else None
# vim: set ts=4 sw=4 et: |
71 | shutdown | """A client for in-process kernels."""
# -----------------------------------------------------------------------------
# Copyright (C) 2012 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE, distributed as part of this software.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import asyncio
from jupyter_client.client import KernelClient
from jupyter_client.clientabc import KernelClientABC
from jupyter_core.utils import run_sync
# IPython imports
from traitlets import Instance, Type, default
# Local imports
from .channels import InProcessChannel, InProcessHBChannel
# -----------------------------------------------------------------------------
# Main kernel Client class
# -----------------------------------------------------------------------------
class InProcessKernelClient(KernelClient):
"""A client for an in-process kernel.
This class implements the interface of
`jupyter_client.clientabc.KernelClientABC` and allows
(asynchronous) frontends to be used seamlessly with an in-process kernel.
See `jupyter_client.client.KernelClient` for docstrings.
"""
# The classes to use for the various channels.
shell_channel_class = Type(InProcessChannel)
iopub_channel_class = Type(InProcessChannel)
stdin_channel_class = Type(InProcessChannel)
control_channel_class = Type(InProcessChannel)
hb_channel_class = Type(InProcessHBChannel)
kernel = Instance("ipykernel.inprocess.ipkernel.InProcessKernel", allow_none=True)
# --------------------------------------------------------------------------
# Channel management methods
# --------------------------------------------------------------------------
@default("blocking_class")
def _default_blocking_class(self):
from .blocking import BlockingInProcessKernelClient
return BlockingInProcessKernelClient
def get_connection_info(self):
"""Get the connection info for the client."""
d = super().get_connection_info()
d["kernel"] = self.kernel
return d
def start_channels(self, *args, **kwargs):
"""Start the channels on the client."""
super().start_channels()
self.kernel.frontends.append(self)
@property
def shell_channel(self):
if self._shell_channel is None:
self._shell_channel = self.shell_channel_class(self)
return self._shell_channel
@property
def iopub_channel(self):
if self._iopub_channel is None:
self._iopub_channel = self.iopub_channel_class(self)
return self._iopub_channel
@property
def stdin_channel(self):
if self._stdin_channel is None:
self._stdin_channel = self.stdin_channel_class(self)
return self._stdin_channel
@property
def control_channel(self):
if self._control_channel is None:
self._control_channel = self.control_channel_class(self)
return self._control_channel
@property
def hb_channel(self):
if self._hb_channel is None:
self._hb_channel = self.hb_channel_class(self)
return self._hb_channel
# Methods for sending specific messages
# -------------------------------------
def execute(
self, code, silent=False, store_history=True, user_expressions=None, allow_stdin=None
):
"""Execute code on the client."""
if allow_stdin is None:
allow_stdin = self.allow_stdin
content = dict(
code=code,
silent=silent,
store_history=store_history,
user_expressions=user_expressions or {},
allow_stdin=allow_stdin,
)
msg = self.session.msg("execute_request", content)
self._dispatch_to_kernel(msg)
return msg["header"]["msg_id"]
def complete(self, code, cursor_pos=None):
"""Get code completion."""
if cursor_pos is None:
cursor_pos = len(code)
content = dict(code=code, cursor_pos=cursor_pos)
msg = self.session.msg("complete_request", content)
self._dispatch_to_kernel(msg)
return msg["header"]["msg_id"]
def inspect(self, code, cursor_pos=None, detail_level=0):
"""Get code inspection."""
if cursor_pos is None:
cursor_pos = len(code)
content = dict(
code=code,
cursor_pos=cursor_pos,
detail_level=detail_level,
)
msg = self.session.msg("inspect_request", content)
self._dispatch_to_kernel(msg)
return msg["header"]["msg_id"]
def history(self, raw=True, output=False, hist_access_type="range", **kwds):
"""Get code history."""
content = dict(raw=raw, output=output, hist_access_type=hist_access_type, **kwds)
msg = self.session.msg("history_request", content)
self._dispatch_to_kernel(msg)
return msg["header"]["msg_id"]
def METHOD_NAME(self, restart=False):
"""Handle shutdown."""
# FIXME: What to do here?
msg = "Cannot shutdown in-process kernel"
raise NotImplementedError(msg)
def kernel_info(self):
"""Request kernel info."""
msg = self.session.msg("kernel_info_request")
self._dispatch_to_kernel(msg)
return msg["header"]["msg_id"]
def comm_info(self, target_name=None):
"""Request a dictionary of valid comms and their targets."""
content = {} if target_name is None else dict(target_name=target_name)
msg = self.session.msg("comm_info_request", content)
self._dispatch_to_kernel(msg)
return msg["header"]["msg_id"]
def input(self, string):
"""Handle kernel input."""
if self.kernel is None:
msg = "Cannot send input reply. No kernel exists."
raise RuntimeError(msg)
self.kernel.raw_input_str = string
def is_complete(self, code):
"""Handle an is_complete request."""
msg = self.session.msg("is_complete_request", {"code": code})
self._dispatch_to_kernel(msg)
return msg["header"]["msg_id"]
def _dispatch_to_kernel(self, msg):
"""Send a message to the kernel and handle a reply."""
kernel = self.kernel
if kernel is None:
msg = "Cannot send request. No kernel exists."
raise RuntimeError(msg)
stream = kernel.shell_stream
self.session.send(stream, msg)
msg_parts = stream.recv_multipart()
if run_sync is not None:
dispatch_shell = run_sync(kernel.dispatch_shell)
dispatch_shell(msg_parts)
else:
loop = asyncio.get_event_loop()
loop.run_until_complete(kernel.dispatch_shell(msg_parts))
idents, reply_msg = self.session.recv(stream, copy=False)
self.shell_channel.call_handlers_later(reply_msg)
def get_shell_msg(self, block=True, timeout=None):
"""Get a shell message."""
return self.shell_channel.get_msg(block, timeout)
def get_iopub_msg(self, block=True, timeout=None):
"""Get an iopub message."""
return self.iopub_channel.get_msg(block, timeout)
def get_stdin_msg(self, block=True, timeout=None):
"""Get a stdin message."""
return self.stdin_channel.get_msg(block, timeout)
def get_control_msg(self, block=True, timeout=None):
"""Get a control message."""
return self.control_channel.get_msg(block, timeout)
# -----------------------------------------------------------------------------
# ABC Registration
# -----------------------------------------------------------------------------
KernelClientABC.register(InProcessKernelClient) |
72 | get ff | r"""Functions for $K\to \pi\ell\nu$ decays."""
from math import sqrt, log
import flavio
from flavio.classes import Observable, Prediction
def METHOD_NAME(q2, par, K):
ff_name = 'K->pi form factor'
ff_K0 = flavio.classes.AuxiliaryQuantity[ff_name].prediction(par_dict=par, wc_obj=None, q2=q2)
if K == 'KL' or K == 'KS':
return ff_K0
elif K == 'K+':
# isospin breaking correction for K+->pi0lnu: multiply all FFs by 1+delta
return {k: (par['K->pi delta_K+pi0'] + 1)*v for k,v in ff_K0.items()}
def get_angularcoeff(q2, wc_obj, par, K, P, lep):
Jlist = [_get_angularcoeff(q2, wc_obj, par, K, P, lep, nu)
for nu in ['e', 'mu', 'tau']]
J = {}
J['a'] = sum([JJ['a'] for JJ in Jlist])
J['b'] = sum([JJ['b'] for JJ in Jlist])
J['c'] = sum([JJ['c'] for JJ in Jlist])
return J
def _get_angularcoeff(q2, wc_obj, par, K, P, lep, nu):
GF = par['GF']
ml = par['m_'+lep]
mK = par['m_'+K]
mP = par['m_'+P]
Vus = flavio.physics.ckm.get_ckm(par)[0,1]
# renormalization scale is m_rho
scale = par['m_rho0']
ms = flavio.physics.running.running.get_ms(par, scale)
wc = flavio.physics.bdecays.wilsoncoefficients.get_wceff_fccc(wc_obj, par, 'su', lep, nu, ms, scale, nf=3)
N = 4*GF/sqrt(2)*Vus
ff = METHOD_NAME(q2, par, K)
h = flavio.physics.bdecays.angular.helicity_amps_p(q2, mK, mP, ms, 0, ml, 0, ff, wc, N)
J = flavio.physics.bdecays.angular.angularcoeffs_general_p(h, q2, mK, mP, ms, 0, ml, 0)
return J
def dGdq2(J):
return 2 * (J['a'] + J['c']/3.)
def dBRdq2(q2, wc_obj, par, K, P, lep):
ml = par['m_'+lep]
mK = par['m_'+K]
mP = par['m_'+P]
if q2 < ml**2 or q2 > (mK-mP)**2:
return 0
tauK = par['tau_'+K]
J = get_angularcoeff(q2, wc_obj, par, K, P, lep)
if P == 'pi0':
# factor of 1/2 for neutral pi due to pi = (uubar-ddbar)/sqrt(2)
return tauK * dGdq2(J) / 2.
if K == 'K+':
deltaEM = par['K+' + lep + '3 delta_EM'] # e.g. 'K+e3 delta_EM'
elif K == 'KL' or K == 'KS':
deltaEM = par['K0' + lep + '3 delta_EM'] # e.g. 'K+e3 delta_EM'
return tauK * dGdq2(J) * (1 + deltaEM)**2
def BR_binned(q2min, q2max, wc_obj, par, K, P, lep):
def integrand(q2):
return dBRdq2(q2, wc_obj, par, K, P, lep)
return flavio.math.integrate.nintegrate(integrand, q2min, q2max)
def BR_tot(wc_obj, par, K, P, lep):
mK = par['m_'+K]
mP = par['m_'+P]
ml = par['m_'+lep]
q2max = (mK-mP)**2
q2min = ml**2
return BR_binned(q2min, q2max, wc_obj, par, K, P, lep)
def BR_tot_function(K, P, lep):
return lambda wc_obj, par: BR_tot(wc_obj, par, K, P, lep)
def logC(wc_obj, par, lep):
mK = par['m_KL']
mP = par['m_pi+']
ml = par['m_' + lep]
q2 = mK**2 - mP**2
ff = METHOD_NAME(q2, par, 'KL')
ff0 = METHOD_NAME(0, par, 'KL')
scale = par['m_rho0']
ms = flavio.physics.running.running.get_ms(par, scale)
wc = flavio.physics.bdecays.wilsoncoefficients.get_wceff_fccc(wc_obj, par, 'su', lep, lep, ms, scale, nf=3)
A = ml / q2 * (wc['a'] + wc['ap']).real
mu = 0 # mu/ms neglected
C = ff['f0'] / ff0['f0'] * ((wc['p'] + wc['pp']).real / (ms + mu) + A) / A
return log(C)
def RT(wc_obj, par, lep):
mK = par['m_KL']
mP = par['m_pi+']
scale = par['m_rho0']
ms = flavio.physics.running.running.get_ms(par, scale)
wc = flavio.physics.bdecays.wilsoncoefficients.get_wceff_fccc(wc_obj, par, 'su', lep, lep, ms, scale, nf=3)
ff = METHOD_NAME(0, par, 'KL')
BT = ff['fT'] * 2 * mK / (mK + mP) # convert between tensor FF conventions
return -2 * BT / ff['f+'] * wc['tp'].real
def logC_function(lep):
def _(wc_obj, par):
return logC(wc_obj, par, lep)
return _
def RT_function(lep):
def _(wc_obj, par):
return RT(wc_obj, par, lep)
return _
# Observable and Prediction instances
_tex = {'e': 'e', 'mu': r'\mu', 'l': r'\ell'}
_tex_br = {'dBR/dq2': r'\frac{d\text{BR}}{dq^2}', 'BR': r'\text{BR}', '<BR>': r'\langle\text{BR}\rangle'}
_args = {'dBR/dq2': ['q2'], 'BR': None, '<BR>': ['q2min', 'q2max']}
_hadr = {
'KL->pi': {'tex': r"K_L\to \pi^+", 'K': 'KL', 'P': 'pi+', },
'KS->pi': {'tex': r"K_S\to \pi^+", 'K': 'KS', 'P': 'pi+', },
'K+->pi': {'tex': r"K^+\to \pi^0", 'K': 'K+', 'P': 'pi0', },
}
_hadr_lnC = {
'K->pi': {'tex': r"K\to \pi", 'K': 'KL', 'P': 'pi+', },
}
for l in ['e', 'mu', 'l']:
for M in _hadr.keys():
_process_tex = _hadr[M]['tex']+_tex[l]+r"^+\nu"
_process_taxonomy = r'Process :: $s$ hadron decays :: Semi-leptonic tree-level decays :: $K\to P\ell\nu$ :: $' + _process_tex + r"$"
_obs_name = "BR("+M+l+"nu)"
_obs = Observable(_obs_name)
_obs.set_description(r"Total branching ratio of $" + _process_tex + r"$")
_obs.tex = r"$\text{BR}(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, BR_tot_function(_hadr[M]['K'], _hadr[M]['P'], l))
for M in _hadr_lnC.keys():
_obs_name = "lnC("+M+l+"nu)"
_obs = Observable(_obs_name)
_obs.set_description(r"Effective scalar form factor in $" + _process_tex + r"$")
_obs.tex = r"$\ln(C)(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, logC_function(l))
_obs_name = "RT("+M+l+"nu)"
_obs = Observable(_obs_name)
_obs.set_description(r"Tensor coupling in $" + _process_tex + r"$")
_obs.tex = r"$R_T(" + _process_tex + r")$"
_obs.add_taxonomy(_process_taxonomy)
Prediction(_obs_name, RT_function(l)) |
73 | test sa dep virtual server sync devices | from functools import reduce
import operator
from django.contrib.auth.models import Group, Permission
from django.db.models import Q
from django.urls import reverse
from django.utils.crypto import get_random_string
from django.test import TestCase, override_settings
from accounts.models import APIToken, User
from .utils import force_dep_virtual_server
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class APIViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.service_account = User.objects.create(
username=get_random_string(12),
email="{}@zentral.io".format(get_random_string(12)),
is_service_account=True
)
cls.user = User.objects.create_user("godzilla", "godzilla@zentral.io", get_random_string(12))
cls.group = Group.objects.create(name=get_random_string(12))
cls.service_account.groups.set([cls.group])
cls.user.groups.set([cls.group])
cls.api_key = APIToken.objects.update_or_create_for_user(cls.service_account)
# utility methods
def set_permissions(self, *permissions):
if permissions:
permission_filter = reduce(operator.or_, (
Q(content_type__app_label=app_label, codename=codename)
for app_label, codename in (
permission.split(".")
for permission in permissions
)
))
self.group.permissions.set(list(Permission.objects.filter(permission_filter)))
else:
self.group.permissions.clear()
def login(self, *permissions):
self.set_permissions(*permissions)
self.client.force_login(self.user)
def login_redirect(self, url):
response = self.client.get(url)
self.assertRedirects(response, "{u}?next={n}".format(u=reverse("login"), n=url))
def post(self, url, include_token=True):
kwargs = {}
if include_token:
kwargs["HTTP_AUTHORIZATION"] = f"Token {self.api_key}"
return self.client.post(url, **kwargs)
# dep_virtual_server_sync_devices
def METHOD_NAME(self):
dep_server = force_dep_virtual_server()
response = self.post(reverse("mdm_api:dep_virtual_server_sync_devices", args=(dep_server.pk,)),
include_token=False)
self.assertEqual(response.status_code, 401)
def test_sa_dep_virtual_server_sync_devices_permission_denied(self):
dep_server = force_dep_virtual_server()
response = self.post(reverse("mdm_api:dep_virtual_server_sync_devices", args=(dep_server.pk,)))
self.assertEqual(response.status_code, 403)
def test_sa_dep_virtual_server_sync_devices(self):
dep_server = force_dep_virtual_server()
self.set_permissions("mdm.view_depvirtualserver")
response = self.post(reverse("mdm_api:dep_virtual_server_sync_devices", args=(dep_server.pk,)))
self.assertEqual(response.status_code, 201)
self.assertEqual(sorted(response.json().keys()), ['task_id', 'task_result_url'])
def test_user_dep_virtual_server_sync_devices_unauthorized(self):
dep_server = force_dep_virtual_server()
response = self.client.post(reverse("mdm_api:dep_virtual_server_sync_devices", args=(dep_server.pk,)))
self.assertEqual(response.status_code, 401)
def test_user_dep_virtual_server_sync_devices_permission_denied(self):
dep_server = force_dep_virtual_server()
self.login()
response = self.client.post(reverse("mdm_api:dep_virtual_server_sync_devices", args=(dep_server.pk,)))
self.assertEqual(response.status_code, 403)
def test_user_dep_virtual_server_sync_devices(self):
dep_server = force_dep_virtual_server()
self.login("mdm.view_depvirtualserver")
response = self.client.post(reverse("mdm_api:dep_virtual_server_sync_devices", args=(dep_server.pk,)))
self.assertEqual(response.status_code, 201)
self.assertEqual(sorted(response.json().keys()), ['task_id', 'task_result_url']) |
74 | test compose support error msg | import asyncio
import concurrent.futures
import time
import timeit
import urllib.parse
from contextlib import contextmanager
from datetime import datetime
from urllib.parse import unquote_plus
import pytest
import yarl
from simcore_service_webserver.utils import (
DATETIME_FORMAT,
compose_support_error_msg,
compute_sha1_on_small_dataset,
now_str,
to_datetime,
)
from yarl import URL
def test_time_utils():
snapshot0 = now_str()
time.sleep(0.5)
snapshot1 = now_str()
now0 = to_datetime(snapshot0)
now1 = to_datetime(snapshot1)
assert now0 < now1
# tests biyective
now_time = datetime.utcnow()
snapshot = now_time.strftime(DATETIME_FORMAT)
assert now_time == datetime.strptime(snapshot, DATETIME_FORMAT)
def test_yarl_url_compose_changed_with_latest_release():
# TODO: add tests and do this upgrade carefuly. Part of https://github.com/ITISFoundation/osparc-simcore/issues/2008
#
# With yarl=1.6.* failed tests/unit/isolated/test_director_api.py::test_director_workflow
#
# Actually is more consistent since
# services/simcore%2Fservices%2Fdynamic%2Fsmash/1.0.3 is decoposed as [services, simcore%2Fservices%2Fdynamic%2Fsmash, 1.0.3]
#
api_endpoint = URL("http://director:8001/v0")
service_key = "simcore/services/dynamic/smash"
service_version = "1.0.3"
url = (
api_endpoint
/ "services"
/ urllib.parse.quote(service_key, safe="")
/ service_version
)
assert (
"/",
"v0",
"services",
service_key,
service_version,
) == url.parts, f"In yarl==1.5.1, this fails in {yarl.__version__}"
assert "simcore/services/dynamic/smash/1.0.3" == unquote_plus(
"simcore%2Fservices%2Fdynamic%2Fsmash/1.0.3"
)
assert (
urllib.parse.quote(service_key, safe="")
== "simcore%2Fservices%2Fdynamic%2Fsmash"
)
assert (
urllib.parse.quote_plus(service_key) == "simcore%2Fservices%2Fdynamic%2Fsmash"
)
@pytest.mark.skip(reason="DEV-demo")
async def test_compute_sha1_on_small_dataset(fake_project: dict):
# Based on GitHK review https://github.com/ITISFoundation/osparc-simcore/pull/2556:
# From what I know, these having function tend to be a bit CPU intensive, based on the size of the dataset.
# Could we maybe have an async version of this function here, run it on an executor?
#
# PC: Here we check the overhead of sha when adding a pool executor
@contextmanager
def timeit_ctx(what):
start = timeit.default_timer()
yield
stop = timeit.default_timer()
print(f"Time for {what}:", f"{stop - start} secs")
# dataset is N copies of a project dataset (typical dataset 'unit' in this module)
N = 10_000
data = [
fake_project,
] * N
print("-" * 100)
with timeit_ctx("compute_sha1 sync"):
project_sha2_sync = compute_sha1_on_small_dataset(data)
with timeit_ctx("compute_sha1 async"):
loop = asyncio.get_running_loop()
with concurrent.futures.ProcessPoolExecutor() as pool:
project_sha2_async = await loop.run_in_executor(
pool, compute_sha1_on_small_dataset, data
)
assert project_sha2_sync == project_sha2_async
# N=1
# Time for compute_sha1_sync: 3.153807483613491e-05 secs
# Time for compute_sha1_async: 0.03046882478520274 secs
# N=100
# Time for compute_sha1 sync: 0.0005367340054363012 secs
# Time for compute_sha1 async: 0.029975621961057186 secs
# N=1000
# Time for compute_sha1 sync: 0.005468853982165456 secs
# Time for compute_sha1 async: 0.04451707797124982 secs
# N=10000
# Time for compute_sha1 sync: 0.05151305114850402 secs
# Time for compute_sha1 async: 0.09799357503652573 secs
# For larger datasets, async solution definitvely scales better
# but for smaller ones, the overhead is considerable
def METHOD_NAME():
msg = compose_support_error_msg(
"first sentence for Mr.X \n Second sentence.",
error_code="OEC:139641204989600",
support_email="support@email.com",
)
assert (
msg == "First sentence for Mr.X. Second sentence."
" For more information please forward this message to support@email.com [OEC:139641204989600]"
) |
75 | test lan | #
# Copyright (C) 2010 Uninett AS
#
# This file is part of Network Administration Visualized (NAV).
#
# NAV is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 3 as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details. You should have received a copy of the GNU General Public
# License along with NAV. If not, see <http://www.gnu.org/licenses/>.
#
"""Unit tests for descrparser module."""
import unittest
from nav.ipdevpoll import descrparsers
class TestNtnuConvention(object):
sysname = 'foo-sw'
def METHOD_NAME(self):
d = descrparsers.parse_ntnu_convention(self.sysname, 'lan,math,staff')
assert d is not None
assert d['org'] == 'math'
assert d['usage'] == 'staff'
assert d['netident'] == 'math,staff'
def test_lan_with_comment_and_vlan(self):
d = descrparsers.parse_ntnu_convention(
self.sysname, 'lan,physics,students,campus_dragv,340'
)
d is not None
assert d['org'] == 'physics'
assert d['usage'] == 'students'
assert d['comment'] == 'campus_dragv'
assert d['netident'] == 'physics,students,campus_dragv'
assert d['vlan'] == 340
def test_lan_with_numbered_usage_and_comment(self):
d = descrparsers.parse_ntnu_convention(
self.sysname, 'lan,math,staff12,campus_lade'
)
d is not None
assert d['org'] == 'math'
assert d['usage'] == 'staff'
assert d['n'] == 12
assert d['netident'] == 'math,staff12,campus_lade'
assert d['comment'] == 'campus_lade'
def test_lan_with_spaces(self):
d = descrparsers.parse_ntnu_convention(
self.sysname, 'lan ,physics,students, campus_dragv, 340'
)
d is not None
assert d['org'] == 'physics'
assert d['usage'] == 'students'
assert d['comment'] == 'campus_dragv'
assert d['netident'] == 'physics,students,campus_dragv'
assert d['vlan'] == 340
def test_lan_invalid(self):
d = descrparsers.parse_ntnu_convention(self.sysname, 'lan,foo')
assert d is None
def test_link(self):
d = descrparsers.parse_ntnu_convention(self.sysname, 'link,mts-gw')
d is not None
assert d['to_router'] == 'mts-gw'
def test_link_with_comment_and_vlan(self):
d = descrparsers.parse_ntnu_convention(
self.sysname, 'link,moholt-gw,Tn_20022350,923'
)
assert d['to_router'] == 'moholt-gw'
assert d['comment'] == 'Tn_20022350'
assert d['netident'] == '%s,%s' % (self.sysname, 'moholt-gw')
assert d['vlan'] == 923
def test_core(self):
d = descrparsers.parse_ntnu_convention(self.sysname, 'core,it,wlan')
d is not None
assert d['org'] == 'it'
assert d['usage'] == 'wlan'
assert d['netident'] == 'it,wlan'
def test_core_with_comment_and_vlan(self):
d = descrparsers.parse_ntnu_convention(self.sysname, 'core,it,fddi,manring,180')
d is not None
assert d['org'] == 'it'
assert d['usage'] == 'fddi'
assert d['comment'] == 'manring'
assert d['netident'] == 'it,fddi,manring'
assert d['vlan'] == 180
def test_core_invalid(self):
d = descrparsers.parse_ntnu_convention(self.sysname, 'core,foo')
assert d is None
def test_elink(self):
d = descrparsers.parse_ntnu_convention(self.sysname, 'elink,trd-gw,uninett')
d is not None
assert d['to_router'] == 'trd-gw'
assert d['to_org'] == 'uninett'
assert d['netident'] == '%s,%s' % (self.sysname, 'trd-gw')
def test_elink_with_empty_comment(self):
d = descrparsers.parse_ntnu_convention(
self.sysname, 'elink,sintef-gw,sintef,,902'
)
d is not None
assert d['to_router'] == 'sintef-gw'
assert d['to_org'] == 'sintef'
assert not d['comment']
assert d['netident'] == '%s,%s' % (self.sysname, 'sintef-gw')
assert d['vlan'] == 902
def test_invalid(self):
d = descrparsers.parse_ntnu_convention(self.sysname, 'foobar,bar,baz')
assert d is None
class TestUninettConvention(object):
def test_simple(self):
d = descrparsers.parse_uninett_convention(
'foo-sw', 'lokal link, uninett-gw.teknobyen-gw2'
)
assert d['comment'] == 'lokal link'
assert d['netident'] == 'uninett-gw.teknobyen-gw2'
def test_invalid(self):
d = descrparsers.parse_uninett_convention('foo-sw', 'KX182')
assert d is None |
76 | run | import asyncio
import logging
import math
from asyncio import FIRST_COMPLETED
from gettext import gettext as _
from pulpcore.plugin.models import Artifact, ProgressReport, Remote
from pulpcore.plugin.stages import (
DeclarativeArtifact,
DeclarativeContent,
DeclarativeVersion,
Stage,
)
from pulp_ansible.app.constants import PAGE_SIZE
from pulp_ansible.app.models import AnsibleRepository, RoleRemote, Role
from pulp_ansible.app.tasks.utils import get_api_version, get_page_url, parse_metadata
log = logging.getLogger(__name__)
# The Github URL template to fetch a .tar.gz file from
GITHUB_URL = "https://github.com/%s/%s/archive/%s.tar.gz"
def synchronize(remote_pk, repository_pk, mirror=False):
"""
Sync content from the remote repository.
Create a new version of the repository that is synchronized with the remote.
Args:
remote_pk (str): The remote PK.
repository_pk (str): The repository PK.
mirror (bool): True for mirror mode, False for additive.
Raises:
ValueError: If the remote does not specify a URL to sync.
"""
remote = RoleRemote.objects.get(pk=remote_pk)
repository = AnsibleRepository.objects.get(pk=repository_pk)
if not remote.url:
raise ValueError(_("A remote must have a url specified to synchronize."))
log.info(
_("Synchronizing: repository=%(r)s remote=%(p)s"), {"r": repository.name, "p": remote.name}
)
first_stage = RoleFirstStage(remote)
d_version = DeclarativeVersion(first_stage, repository, mirror=mirror)
return d_version.create()
class RoleFirstStage(Stage):
"""
The first stage of a pulp_ansible sync pipeline for roles.
"""
def __init__(self, remote):
"""
The first stage of a pulp_ansible sync pipeline.
Args:
remote (RoleRemote): The remote data to be used when syncing
"""
super().__init__()
self.remote = remote
# Interpret download policy
self.deferred_download = self.remote.policy != Remote.IMMEDIATE
async def METHOD_NAME(self):
"""
Build and emit `DeclarativeContent` from the ansible metadata.
"""
async with ProgressReport(
message="Parsing Role Metadata", code="sync.parsing.metadata"
) as pb:
async for metadata in self._fetch_roles():
for version in metadata["summary_fields"]["versions"]:
url = GITHUB_URL % (
metadata["github_user"],
metadata["github_repo"],
version["name"],
)
role = Role(
version=version["name"],
name=metadata["name"],
namespace=metadata["namespace"],
)
relative_path = "%s/%s/%s.tar.gz" % (
metadata["namespace"],
metadata["name"],
version["name"],
)
d_artifact = DeclarativeArtifact(
artifact=Artifact(),
url=url,
relative_path=relative_path,
remote=self.remote,
deferred_download=self.deferred_download,
)
d_content = DeclarativeContent(content=role, d_artifacts=[d_artifact])
await pb.aincrement()
await self.put(d_content)
async def _fetch_roles(self):
async for metadata in self._fetch_galaxy_pages():
for result in metadata["results"]:
role = {
"name": result["name"],
"namespace": result["summary_fields"]["namespace"]["name"],
"summary_fields": result["summary_fields"], # needed for versions
"github_user": result["github_user"],
"github_repo": result["github_repo"],
}
yield role
async def _fetch_galaxy_pages(self):
"""
Fetch the roles in a remote repository.
Returns:
async generator: dicts that represent pages from galaxy api
"""
page_count = 0
remote = self.remote
progress_data = dict(
message="Parsing Pages from Galaxy Roles API", code="sync.parsing.roles"
)
async with ProgressReport(**progress_data) as progress_bar:
api_version = get_api_version(remote.url)
downloader = remote.get_downloader(url=get_page_url(remote.url, api_version))
metadata = parse_metadata(await downloader.METHOD_NAME())
page_count = math.ceil(float(metadata["count"]) / float(PAGE_SIZE))
progress_bar.total = page_count
await progress_bar.asave()
yield metadata
await progress_bar.aincrement()
# Concurrent downloads are limited by aiohttp...
not_done = set(
remote.get_downloader(url=get_page_url(remote.url, api_version, page)).METHOD_NAME()
for page in range(2, page_count + 1)
)
while not_done:
done, not_done = await asyncio.wait(not_done, return_when=FIRST_COMPLETED)
for item in done:
yield parse_metadata(item.result())
await progress_bar.aincrement() |
77 | test file upload file name with space | import os
from urllib.parse import urlparse
from django.core.files.storage import default_storage
from ....product.tests.utils import create_image
from ...tests.utils import (
assert_no_permission,
get_graphql_content,
get_multipart_request_body,
)
FILE_UPLOAD_MUTATION = """
mutation fileUpload($file: Upload!) {
fileUpload(file: $file) {
uploadedFile {
url
contentType
}
errors {
code
}
}
}
"""
def test_file_upload_by_staff(staff_api_client, site_settings, media_root):
# given
image_file, image_name = create_image()
variables = {"image": image_name}
body = get_multipart_request_body(
FILE_UPLOAD_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["fileUpload"]
errors = data["errors"]
assert not errors
assert data["uploadedFile"]["contentType"] == "image/jpeg"
file_name, format = os.path.splitext(image_file._name)
returned_url = data["uploadedFile"]["url"]
file_path = urlparse(returned_url).path
assert file_path.startswith(f"/media/file_upload/{file_name}")
assert file_path.endswith(format)
assert default_storage.exists(file_path.lstrip("/media"))
def test_file_upload_by_customer(user_api_client, media_root):
# given
image_file, image_name = create_image()
variables = {"image": image_name}
body = get_multipart_request_body(
FILE_UPLOAD_MUTATION, variables, image_file, image_name
)
# when
response = user_api_client.post_multipart(body)
# then
assert_no_permission(response)
def test_file_upload_by_app(app_api_client, media_root):
# given
image_file, image_name = create_image()
variables = {"image": image_name}
body = get_multipart_request_body(
FILE_UPLOAD_MUTATION, variables, image_file, image_name
)
# when
response = app_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["fileUpload"]
errors = data["errors"]
assert not errors
assert data["uploadedFile"]["contentType"] == "image/jpeg"
file_name, format = os.path.splitext(image_file._name)
returned_url = data["uploadedFile"]["url"]
file_path = urlparse(returned_url).path
assert file_path.startswith(f"/media/file_upload/{file_name}")
assert file_path.endswith(format)
assert default_storage.exists(file_path.lstrip("/media"))
def test_file_upload_by_superuser(superuser_api_client, media_root):
# given
image_file, image_name = create_image()
variables = {"image": image_name}
body = get_multipart_request_body(
FILE_UPLOAD_MUTATION, variables, image_file, image_name
)
# when
response = superuser_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["fileUpload"]
errors = data["errors"]
assert not errors
assert data["uploadedFile"]["contentType"] == "image/jpeg"
file_name, format = os.path.splitext(image_file._name)
returned_url = data["uploadedFile"]["url"]
file_path = urlparse(returned_url).path
assert file_path.startswith(f"/media/file_upload/{file_name}")
assert file_path.endswith(format)
assert default_storage.exists(file_path.lstrip("/media"))
def test_file_upload_file_with_the_same_name_already_exists(
staff_api_client, media_root, site_settings
):
"""Ensure that when the file with the same name as uploaded file,
already exists, the file name will be renamed and save as another file.
"""
# given
image_file1, image_name1 = create_image()
path = default_storage.save(image_file1._name, image_file1)
image_file, image_name = create_image()
assert image_file1 != image_file
assert image_name == image_name1
assert image_file._name == image_file1._name
variables = {"image": image_name}
body = get_multipart_request_body(
FILE_UPLOAD_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["fileUpload"]
errors = data["errors"]
domain = site_settings.site.domain
assert not errors
assert data["uploadedFile"]["contentType"] == "image/jpeg"
file_url = data["uploadedFile"]["url"]
assert file_url != f"http://{domain}/media/{image_file._name}"
assert file_url != f"http://{domain}/media/{path}"
assert default_storage.exists(file_url.replace(f"http://{domain}/media/", ""))
def METHOD_NAME(staff_api_client, media_root):
# given
image_file, image_name = create_image("file name with spaces")
variables = {"image": image_name}
body = get_multipart_request_body(
FILE_UPLOAD_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["fileUpload"]
errors = data["errors"]
assert not errors
assert data["uploadedFile"]["contentType"] == "image/jpeg"
file_name, format = os.path.splitext(image_file._name)
file_name = file_name.replace(" ", "_")
returned_url = data["uploadedFile"]["url"]
file_path = urlparse(returned_url).path
assert file_path.startswith(f"/media/file_upload/{file_name}")
assert file_path.endswith(format)
assert default_storage.exists(file_path.lstrip("/media"))
def test_file_upload_file_name_with_encoded_value(staff_api_client, media_root):
# given
image_file, image_name = create_image("file%20name")
variables = {"image": image_name}
body = get_multipart_request_body(
FILE_UPLOAD_MUTATION, variables, image_file, image_name
)
# when
response = staff_api_client.post_multipart(body)
# then
content = get_graphql_content(response)
data = content["data"]["fileUpload"]
errors = data["errors"]
assert not errors
assert data["uploadedFile"]["contentType"] == "image/jpeg"
file_name, format = os.path.splitext(image_file._name)
returned_url = data["uploadedFile"]["url"]
file_path = urlparse(returned_url).path
assert file_path.startswith(f"/media/file_upload/{file_name}")
assert file_path.endswith(format)
assert default_storage.exists(file_path.lstrip("/media")) |
78 | lru cached method | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import weakref
from functools import lru_cache, wraps
from sawtooth_validator.state.merkle import MerkleDatabase
from sawtooth_validator.state.merkle import INIT_ROOT_KEY
# Wrapper of lru_cache that works for instance methods
def METHOD_NAME(*lru_args, **lru_kwargs):
def decorator(wrapped_fn):
@wraps(wrapped_fn)
def wrapped(self, *args, **kwargs):
# Use a weak reference to self; this prevents a self-reference
# cycle that fools the garbage collector into thinking the instance
# shouldn't be dropped when all external references are dropped.
weak_ref_to_self = weakref.ref(self)
@wraps(wrapped_fn)
@lru_cache(*lru_args, **lru_kwargs)
def cached(*args, **kwargs):
return wrapped_fn(weak_ref_to_self(), *args, **kwargs)
setattr(self, wrapped_fn.__name__, cached)
return cached(*args, **kwargs)
return wrapped
return decorator
class StateViewFactory:
"""The StateViewFactory produces StateViews for a particular merkle root.
This factory produces read-only views of a merkle tree. For a given
database, these views are considered immutable.
"""
def __init__(self, database):
"""Initializes the factory with a given database.
Args:
database (:obj:`Database`): the database containing the merkle
tree.
"""
self._database = database
@lru_cache()
def create_view(self, state_root_hash=None):
"""Creates a StateView for the given state root hash.
Args:
state_root_hash (str): The state root hash of the state view
to return. If None, returns the state view for the
Returns:
StateView: state view locked to the given root hash.
"""
# Create a default Merkle database and if we have a state root hash,
# update the Merkle database's root to that
if state_root_hash is None:
state_root_hash = INIT_ROOT_KEY
merkle_db = MerkleDatabase(self._database,
merkle_root=state_root_hash)
return StateView(merkle_db)
class StateView:
"""The StateView provides read-only access to a particular merkle tree
root.
The StateView is a read-only view of a merkle tree. Access is limited to
available addresses, collections of leaf nodes, and specific leaf nodes.
The view is lock to a single merkle root, effectively making it an
immutable snapshot.
"""
def __init__(self, tree):
"""Creates a StateView with a given merkle tree.
Args:
tree (:obj:`MerkleDatabase`): the merkle tree for this view
"""
self._tree = tree
@METHOD_NAME()
def get(self, address):
"""
Returns:
bytes the state entry at the given address
"""
return self._tree.get(address)
@METHOD_NAME()
def addresses(self):
"""
Returns:
list of str: the list of addresses available in this view
"""
return self._tree.addresses()
@METHOD_NAME()
def leaves(self, prefix):
"""
Args:
prefix (str): an address prefix under which to look for leaves
Returns:
dict of str,bytes: the state entries at the leaves
"""
return self._tree.leaves(prefix) |
79 | init attributes | ############################ Copyrights and license ############################
# #
# Copyright 2023 Mauricio Martinez <mauricio.martinez@premise.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from datetime import datetime
from typing import Any, Dict
from github.GithubObject import Attribute, NotSet
from github.PaginatedList import PaginatedList
from github.Repository import Repository
from github.Variable import Variable
class OrganizationVariable(Variable):
"""
This class represents a org level GitHub variable. The reference can be found here https://docs.github.com/en/rest/actions/variables
"""
def METHOD_NAME(self) -> None:
self._name: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._visibility: Attribute[str] = NotSet
self._selected_repositories: Attribute[PaginatedList[Repository]] = NotSet
self._selected_repositories_url: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
@property
def visibility(self) -> str:
"""
:type: string
"""
self._completeIfNotSet(self._visibility)
return self._visibility.value
@property
def selected_repositories(self) -> PaginatedList[Repository]:
return PaginatedList(
Repository,
self._requester,
self._selected_repositories_url.value,
None,
list_item="repositories",
)
def edit(
self,
value: str,
visibility: str = "all",
) -> bool:
"""
:calls: `PATCH /orgs/{org}/actions/variables/{variable_name} <https://docs.github.com/en/rest/reference/actions/variables#update-an-organization-variable>`_
:param variable_name: string
:param value: string
:param visibility: string
:rtype: bool
"""
assert isinstance(value, str), value
assert isinstance(visibility, str), visibility
patch_parameters: Dict[str, Any] = {
"name": self.name,
"value": value,
"visibility": visibility,
}
status, _, _ = self._requester.requestJson(
"PATCH",
f"{self.url}/actions/variables/{self.name}",
input=patch_parameters,
)
return status == 204
def add_repo(self, repo: Repository) -> bool:
"""
:calls: 'PUT {org_url}/actions/variables/{variable_name} <https://docs.github.com/en/rest/actions/variables#add-selected-repository-to-an-organization-secret>`_
:param repo: github.Repository.Repository
:rtype: bool
"""
if self.visibility != "selected":
return False
self._requester.requestJsonAndCheck("PUT", f"{self._selected_repositories_url.value}/{repo.id}")
return True
def remove_repo(self, repo: Repository) -> bool:
"""
:calls: 'DELETE {org_url}/actions/variables/{variable_name} <https://docs.github.com/en/rest/actions/variables#add-selected-repository-to-an-organization-secret>`_
:param repo: github.Repository.Repository
:rtype: bool
"""
if self.visibility != "selected":
return False
self._requester.requestJsonAndCheck("DELETE", f"{self._selected_repositories_url.value}/{repo.id}")
return True
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "name" in attributes:
self._name = self._makeStringAttribute(attributes["name"])
if "created_at" in attributes:
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes:
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "visibility" in attributes:
self._visibility = self._makeStringAttribute(attributes["visibility"])
if "selected_repositories_url" in attributes:
self._selected_repositories_url = self._makeStringAttribute(attributes["selected_repositories_url"])
if "url" in attributes:
self._url = self._makeStringAttribute(attributes["url"]) |
80 | test 04 delete applications fail | import time
import json
from flask import url_for
from portality import constants
from doajtest.fixtures import ApplicationFixtureFactory
from doajtest.helpers import DoajTestCase, with_es
from portality import models
from portality.api.current import ApplicationsBulkApi
from portality.api import Api401Error, Api400Error
class TestBulkApplication(DoajTestCase):
@with_es(indices=[models.Application.__type__, models.Journal.__type__])
def test_01_create_applications_success(self):
# set up all the bits we need - 10 applications
data = ApplicationFixtureFactory.incoming_application()
del data["admin"]["current_journal"]
dataset = [data] * 10
# create an account that we'll do the create as
account = models.Account()
account.set_id("test")
account.set_name("Tester")
account.set_email("test@test.com")
# call create on the object (which will save it to the index)
ids = ApplicationsBulkApi.create(dataset, account)
# check that we got the right number of ids back
assert len(ids) == 10
# let the index catch up
time.sleep(1)
# check that each id was actually created
for _id in ids:
s = models.Suggestion.pull(_id)
assert s is not None
@with_es(indices=[models.Application.__type__, models.Journal.__type__])
def test_02_create_applications_fail(self):
# if the account is dud
with self.assertRaises(Api401Error):
data = ApplicationFixtureFactory.incoming_application()
del data["admin"]["current_journal"]
dataset = [data] * 10
ids = ApplicationsBulkApi.create(dataset, None)
# check that the index is empty, as none of them should have been made
_all = [x for x in models.Suggestion.iterall()]
assert len(_all) == 0
# if the data is bust
with self.assertRaises(Api400Error):
account = models.Account()
account.set_id("test")
account.set_name("Tester")
account.set_email("test@test.com")
dataset = dataset[:5] + [{"some": {"junk": "data"}}] + dataset[5:]
ids = ApplicationsBulkApi.create(dataset, account)
# check that the index is empty, as none of them should have been made
_all = [x for x in models.Suggestion.iterall()]
assert len(_all) == 0
@with_es(indices=[models.Application.__type__, models.Journal.__type__, models.Lock.__type__])
def test_03_delete_application_success(self):
# set up all the bits we need
data = ApplicationFixtureFactory.incoming_application()
del data["admin"]["current_journal"]
dataset = [data] * 10
# create the account we're going to work as
account = models.Account()
account.set_id("test")
account.set_name("Tester")
account.set_email("test@test.com")
account.add_role("publisher")
# call create on the objects (which will save it to the index)
ids = ApplicationsBulkApi.create(dataset, account)
# let the index catch up
time.sleep(1)
# now delete half of them
dels = ids[:5]
ApplicationsBulkApi.delete(dels, account)
# let the index catch up
time.sleep(1)
for _id in dels:
ap = models.Suggestion.pull(_id)
assert ap is None
for _id in ids[5:]:
ap = models.Suggestion.pull(_id)
assert ap is not None
@with_es(indices=[models.Application.__type__, models.Journal.__type__])
def METHOD_NAME(self):
# set up all the bits we need
data = ApplicationFixtureFactory.incoming_application()
del data["admin"]["current_journal"]
dataset = [data] * 10
# create the account we're going to work as
account = models.Account()
account.set_id("test")
account.set_name("Tester")
account.set_email("test@test.com")
# call create on the objects (which will save it to the index)
ids = ApplicationsBulkApi.create(dataset, account)
# let the index catch up
time.sleep(1)
# call delete on the object in various context that will fail
# without an account
with self.assertRaises(Api401Error):
ApplicationsBulkApi.delete(ids, None)
# with the wrong account
account.set_id("other")
with self.assertRaises(Api400Error):
ApplicationsBulkApi.delete(ids, account)
# on the wrong id
ids.append("adfasdfhwefwef")
account.set_id("test")
with self.assertRaises(Api400Error):
ApplicationsBulkApi.delete(ids, account)
# on one with a disallowed workflow status
created = models.Suggestion.pull(ids[3])
created.set_application_status(constants.APPLICATION_STATUS_ACCEPTED)
created.save()
time.sleep(1)
with self.assertRaises(Api400Error):
ApplicationsBulkApi.delete(ids, account)
@with_es(indices=[models.Application.__type__, models.Journal.__type__, models.Account.__type__, models.Lock.__type__])
def test_05_test_via_endpoint(self):
""" Use a request context to test the API via the route """
# set up all the bits we need
data = ApplicationFixtureFactory.incoming_application()
del data["admin"]["current_journal"]
dataset = [data] * 10
# create the main account we're going to work as
account = models.Account()
account.set_id("test")
account.set_name("Tester")
account.set_email("test@test.com")
account.generate_api_key()
account.add_role('publisher')
account.add_role('api')
account.save()
# Add another user who doesn't own these articles
somebody_else = models.Account()
somebody_else.set_id("somebody_else")
somebody_else.set_name("Somebody Else")
somebody_else.set_email("somebodyelse@test.com")
somebody_else.generate_api_key()
somebody_else.add_role('publisher')
somebody_else.add_role('api')
somebody_else.save(blocking=True)
assert account.api_key != somebody_else.api_key
with self.app_test.test_request_context():
with self.app_test.test_client() as t_client:
# Create some new applications
resp = t_client.post(url_for('api_v3.bulk_application_create', api_key=account.api_key),
data=json.dumps(dataset))
assert resp.status_code == 201, resp.status_code
reply = json.loads(resp.data.decode("utf-8"))
assert len(reply) == len(dataset)
first_apl = reply.pop()
assert first_apl['status'] == 'created'
# Check we actually created new records
time.sleep(1.5)
assert len(models.Suggestion.all()) == len(dataset)
# Bulk delete
all_but_one = [new_art['id'] for new_art in reply]
resp = t_client.delete(url_for('api_v3.bulk_application_delete', api_key=account.api_key),
data=json.dumps(all_but_one))
assert resp.status_code == 204
time.sleep(1)
# we should have deleted all but one of the applications.
assert len(models.Suggestion.all()) == 1
# And our other user isn't allowed to delete the remaining one.
resp = t_client.delete(url_for('api_v3.bulk_application_delete', api_key=somebody_else.api_key),
data=json.dumps([first_apl['id']]))
assert resp.status_code == 400 |
81 | num points | import pytest
import numpy as np
from firedrake import *
from pyadjoint.tape import get_working_tape, pause_annotation
@pytest.fixture(autouse=True)
def handle_taping():
yield
tape = get_working_tape()
tape.clear_tape()
@pytest.fixture(autouse=True, scope="module")
def handle_annotation():
from firedrake.adjoint import annotate_tape, continue_annotation
if not annotate_tape():
continue_annotation()
yield
# Ensure annotations are paused when we finish.
annotate = annotate_tape()
if annotate:
pause_annotation()
@pytest.fixture(params=["sparse",
"per_cell",
"dense"])
def METHOD_NAME(request):
if request.param == "sparse":
return 2
elif request.param == "per_cell":
return 8
elif request.param == "dense":
return 1024
@pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done
def test_poisson_inverse_conductivity(METHOD_NAME):
# Have to import inside test to make sure cleanup fixtures work as intended
from firedrake.adjoint import Control, ReducedFunctional, minimize
# Use pyadjoint to estimate an unknown conductivity in a
# poisson-like forward model from point measurements
m = UnitSquareMesh(2, 2)
if m.comm.size > 1:
# lower tolerance avoids issues with .at getting different results
# across ranks
m.tolerance = 1e-10
V = FunctionSpace(m, family='CG', degree=2)
Q = FunctionSpace(m, family='CG', degree=2)
# generate random "true" conductivity with beta distribution
pcg = PCG64(seed=0)
rg = RandomGenerator(pcg)
# beta distribution
q_true = rg.beta(Q, 1.0, 2.0)
# Compute the true solution of the PDE.
u_true = Function(V)
v = TestFunction(V)
f = Constant(1.0, domain=m)
k0 = Constant(0.5, domain=m)
bc = DirichletBC(V, 0, 'on_boundary')
F = (k0 * exp(q_true) * inner(grad(u_true), grad(v)) - f * v) * dx
solve(F == 0, u_true, bc)
# Generate random point cloud
np.random.seed(0)
xs = np.random.random_sample((METHOD_NAME, 2))
# we set redundant to False to ensure that we put points on all ranks
point_cloud = VertexOnlyMesh(m, xs, redundant=False)
# Check the point cloud coordinates are correct
assert (point_cloud.input_ordering.coordinates.dat.data_ro == xs).all()
# Generate "observed" data
generator = np.random.default_rng(0)
signal_to_noise = 20
U = u_true.dat.data_ro[:]
u_range = U.max() - U.min()
σ = Constant(u_range / signal_to_noise, domain=point_cloud)
ζ = generator.standard_normal(len(xs))
u_obs_vals = np.array(u_true.at(xs)) + float(σ) * ζ
# Store data on the point_cloud by setting input ordering dat
P0DG_input_ordering = FunctionSpace(point_cloud.input_ordering, 'DG', 0)
u_obs_input_ordering = Function(P0DG_input_ordering)
u_obs_input_ordering.dat.data_wo[:] = u_obs_vals
# Interpolate onto the point_cloud to get it in the right place
P0DG = FunctionSpace(point_cloud, 'DG', 0)
u_obs = Function(P0DG)
u_obs.interpolate(u_obs_input_ordering)
# Run the forward model
u = Function(V)
q = Function(Q)
bc = DirichletBC(V, 0, 'on_boundary')
F = (k0 * exp(q) * inner(grad(u), grad(v)) - f * v) * dx
solve(F == 0, u, bc)
# Two terms in the functional
misfit_expr = 0.5 * ((u_obs - interpolate(u, P0DG)) / σ)**2
α = Constant(0.5, domain=m)
regularisation_expr = 0.5 * α**2 * inner(grad(q), grad(q))
# Form functional and reduced functional
J = assemble(misfit_expr * dx) + assemble(regularisation_expr * dx)
q̂ = Control(q)
Ĵ = ReducedFunctional(J, q̂)
# Estimate q using Newton-CG which evaluates the hessian action
minimize(Ĵ, method='Newton-CG', options={'disp': True})
@pytest.mark.skipcomplex # Taping for complex-valued 0-forms not yet done
@pytest.mark.parallel
def test_poisson_inverse_conductivity_parallel(METHOD_NAME):
test_poisson_inverse_conductivity(METHOD_NAME) |
82 | serialize modbus pdu child | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from dataclasses import dataclass
from plc4py.api.messages.PlcMessage import PlcMessage
from plc4py.protocols.modbus.readwrite.ModbusPDU import ModbusPDU
from plc4py.protocols.modbus.readwrite.ModbusPDU import ModbusPDUBuilder
from plc4py.protocols.modbus.readwrite.ModbusPDUWriteFileRecordRequestItem import (
ModbusPDUWriteFileRecordRequestItem,
)
from plc4py.spi.generation.ReadBuffer import ReadBuffer
from plc4py.spi.generation.WriteBuffer import WriteBuffer
from sys import getsizeof
from typing import List
import math
@dataclass
class ModbusPDUWriteFileRecordRequest(PlcMessage, ModbusPDU):
items: List[ModbusPDUWriteFileRecordRequestItem]
# Accessors for discriminator values.
error_flag: bool = False
function_flag: int = 0x15
response: bool = False
def __post_init__(self):
super().__init__()
def METHOD_NAME(self, write_buffer: WriteBuffer):
write_buffer.push_context("ModbusPDUWriteFileRecordRequest")
# Implicit Field (byte_count) (Used for parsing, but its value is not stored as it's implicitly given by the objects content)
byte_count: int = int(getsizeof(self.items))
write_buffer.write_unsigned_byte(byte_count, logical_name="byteCount")
# Array Field (items)
write_buffer.write_complex_array(self.items, logical_name="items")
write_buffer.pop_context("ModbusPDUWriteFileRecordRequest")
def length_in_bytes(self) -> int:
return int(math.ceil(float(self.get_length_in_bits() / 8.0)))
def get_length_in_bits(self) -> int:
length_in_bits: int = super().get_length_in_bits()
_value: ModbusPDUWriteFileRecordRequest = self
# Implicit Field (byteCount)
length_in_bits += 8
# Array field
if self.items != None:
for element in self.items:
length_in_bits += element.get_length_in_bits()
return length_in_bits
@staticmethod
def static_parse_builder(read_buffer: ReadBuffer, response: bool):
read_buffer.push_context("ModbusPDUWriteFileRecordRequest")
byte_count: int = read_implicit_field("byteCount", read_unsigned_short)
self.items = read_length_array_field(
"items",
DataReaderComplexDefault(
ModbusPDUWriteFileRecordRequestItem.static_parse(read_buffer),
read_buffer,
),
byte_count,
)
read_buffer.pop_context("ModbusPDUWriteFileRecordRequest")
# Create the instance
return ModbusPDUWriteFileRecordRequestBuilder(items)
def equals(self, o: object) -> bool:
if self == o:
return True
if not isinstance(o, ModbusPDUWriteFileRecordRequest):
return False
that: ModbusPDUWriteFileRecordRequest = ModbusPDUWriteFileRecordRequest(o)
return (self.items == that.items) and super().equals(that) and True
def hash_code(self) -> int:
return hash(self)
def __str__(self) -> str:
write_buffer_box_based: WriteBufferBoxBased = WriteBufferBoxBased(True, True)
try:
write_buffer_box_based.writeSerializable(self)
except SerializationException as e:
raise RuntimeException(e)
return "\n" + str(write_buffer_box_based.get_box()) + "\n"
@dataclass
class ModbusPDUWriteFileRecordRequestBuilder(ModbusPDUBuilder):
items: List[ModbusPDUWriteFileRecordRequestItem]
def __post_init__(self):
pass
def build(
self,
) -> ModbusPDUWriteFileRecordRequest:
modbus_pdu_write_file_record_request: ModbusPDUWriteFileRecordRequest = (
ModbusPDUWriteFileRecordRequest(self.items)
)
return modbus_pdu_write_file_record_request |
83 | validate file | #
# Copyright 2018-2023 Elyra Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
from typing import Dict
from typing import List
from typing import TypeVar
import nbformat
from traitlets.config import LoggingConfigurable
# Setup forward reference for type hint on return from class factory method. See
# https://stackoverflow.com/questions/39205527/can-you-annotate-return-type-when-value-is-instance-of-cls/39205612#39205612
F = TypeVar("F", bound="FileReader")
class FileReader(LoggingConfigurable):
"""
Base class for parsing a file for resources according to operation type. Subclasses set
their own parser member variable according to their implementation language.
"""
def __init__(self, filepath: str, **kwargs):
super().__init__(**kwargs)
self._filepath = filepath
@property
def filepath(self):
return self._filepath
@property
def language(self) -> str:
file_extension = os.path.splitext(self._filepath)[-1]
if file_extension == ".py":
return "python"
elif file_extension == ".r":
return "r"
else:
return None
def read_next_code_chunk(self) -> List[str]:
"""
Implements a generator for lines of code in the specified filepath. Subclasses
may override if explicit line-by-line parsing is not feasible, e.g. with Notebooks.
"""
with open(self._filepath) as f:
for line in f:
yield [line.strip()]
class NotebookReader(FileReader):
def __init__(self, filepath: str, **kwargs):
super().__init__(filepath, **kwargs)
with open(self._filepath) as f:
self._notebook = nbformat.read(f, as_version=4)
self._language = None
try:
self._language = self._notebook["metadata"]["kernelspec"]["language"].lower()
except KeyError:
self.log.warning(f"No language metadata found in {self._filepath}")
@property
def language(self) -> str:
return self._language
def read_next_code_chunk(self) -> List[str]:
for cell in self._notebook.cells:
if cell.source and cell.cell_type == "code":
yield cell.source.split("\n")
class ScriptParser(object):
"""
Base class for parsing individual lines of code. Subclasses implement a search_expressions()
function that returns language-specific regexes to match against code lines.
"""
_comment_char = "#"
def _get_line_without_comments(self, line):
if self._comment_char in line:
index = line.find(self._comment_char)
line = line[:index]
return line.strip()
def parse_environment_variables(self, line):
# Parse a line fed from file and match each regex in regex dictionary
line = self._get_line_without_comments(line)
if not line:
return []
matches = []
for key, value in self.search_expressions().items():
for pattern in value:
regex = re.compile(pattern)
for match in regex.finditer(line):
matches.append((key, match))
return matches
class PythonScriptParser(ScriptParser):
def search_expressions(self) -> Dict[str, List]:
# TODO: add more key:list-of-regex pairs to parse for additional resources
regex_dict = dict()
# First regex matches envvar assignments of form os.environ["name"] = value w or w/o value provided
# Second regex matches envvar assignments that use os.getenv("name", "value") with ow w/o default provided
# Third regex matches envvar assignments that use os.environ.get("name", "value") with or w/o default provided
# Both name and value are captured if possible
envs = [
r"os\.environ\[[\"']([a-zA-Z_]+[A-Za-z0-9_]*)[\"']\](?:\s*=(?:\s*[\"'](.[^\"']*)?[\"'])?)*",
r"os\.getenv\([\"']([a-zA-Z_]+[A-Za-z0-9_]*)[\"'](?:\s*\,\s*[\"'](.[^\"']*)?[\"'])?",
r"os\.environ\.get\([\"']([a-zA-Z_]+[A-Za-z0-9_]*)[\"'](?:\s*\,(?:\s*[\"'](.[^\"']*)?[\"'])?)*",
]
regex_dict["env_vars"] = envs
return regex_dict
class RScriptParser(ScriptParser):
def search_expressions(self) -> Dict[str, List]:
# TODO: add more key:list-of-regex pairs to parse for additional resources
regex_dict = dict()
# Tests for matches of the form Sys.setenv("key" = "value")
envs = [
r"Sys\.setenv\([\"']*([a-zA-Z_]+[A-Za-z0-9_]*)[\"']*\s*=\s*[\"']*(.[^\"']*)?[\"']*\)",
r"Sys\.getenv\([\"']*([a-zA-Z_]+[A-Za-z0-9_]*)[\"']*\)(.)*",
]
regex_dict["env_vars"] = envs
return regex_dict
class ContentParser(LoggingConfigurable):
parsers = {"python": PythonScriptParser(), "r": RScriptParser()}
def parse(self, filepath: str) -> dict:
"""Returns a model dictionary of all the regex matches for each key in the regex dictionary"""
properties = {"env_vars": {}, "inputs": [], "outputs": []}
reader = self._get_reader(filepath)
parser = self._get_parser(reader.language)
if not parser:
return properties
for chunk in reader.read_next_code_chunk():
if chunk:
for line in chunk:
matches = parser.parse_environment_variables(line)
for key, match in matches:
if key == "env_vars":
properties[key][match.group(1)] = match.group(2)
else:
properties[key].append(match.group(1))
return properties
def METHOD_NAME(self, filepath: str):
"""
Validate file exists and is file (e.g. not a directory)
"""
if not os.path.exists(filepath):
raise FileNotFoundError(f"No such file or directory: {filepath}")
if not os.path.isfile(filepath):
raise IsADirectoryError(f"Is a directory: {filepath}")
def _get_reader(self, filepath: str):
"""
Find the proper reader based on the file extension
"""
file_extension = os.path.splitext(filepath)[-1]
self.METHOD_NAME(filepath)
if file_extension == ".ipynb":
return NotebookReader(filepath)
elif file_extension in [".py", ".r"]:
return FileReader(filepath)
else:
raise ValueError(f"File type {file_extension} is not supported.")
def _get_parser(self, language: str):
"""
Find the proper parser based on content language
"""
parser = None
if language:
parser = self.parsers.get(language)
if not parser:
self.log.warning(f"Content parser for {language} is not available.")
return parser |
84 | count | import io
import json
import os
import time
import requests
from PIL import Image
from requests.adapters import HTTPAdapter
from module.base.utils import save_image
from module.config.config import AzurLaneConfig
from module.exception import ScriptError
from module.logger import logger
from module.statistics.utils import pack
class DropImage:
def __init__(self, stat, genre, save, upload, info=''):
"""
Args:
stat (AzurStats):
genre:
save:
upload:
"""
self.stat = stat
self.genre = str(genre)
self.save = bool(save)
self.upload = bool(upload)
self.info = info
self.images = []
def add(self, image):
"""
Args:
image (np.ndarray):
"""
if self:
self.images.append(image)
logger.info(f'Drop record added, genre={self.genre}, amount={self.METHOD_NAME}')
def handle_add(self, main, before=None):
"""
Handle wait before and after adding screenshot.
Args:
main (ModuleBase):
before (int, float, tuple): Sleep before adding.
"""
if before is None:
before = main.config.WAIT_BEFORE_SAVING_SCREEN_SHOT
if self:
main.handle_info_bar()
main.device.sleep(before)
main.device.screenshot()
self.add(main.device.image)
def clear(self):
self.images = []
@property
def METHOD_NAME(self):
return len(self.images)
def __bool__(self):
return self.save or self.upload
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self:
self.stat.commit(images=self.images, genre=self.genre, save=self.save, upload=self.upload, info=self.info)
class AzurStats:
TIMEOUT = 20
def __init__(self, config):
"""
Args:
config (AzurLaneConfig):
"""
self.config = config
@property
def _api(self):
method = self.config.DropRecord_API
if method == 'default':
return 'https://azurstats.lyoko.io/api/upload/'
elif method == 'cn_gz_reverse_proxy':
return 'https://service-rjfzwz8i-1301182309.gz.apigw.tencentcs.com/api/upload'
elif method == 'cn_sh_reverse_proxy':
return 'https://service-nlvjetab-1301182309.sh.apigw.tencentcs.com/api/upload'
else:
logger.critical('Invalid upload API, please check your settings')
raise ScriptError('Invalid upload API')
@property
def _user_agent(self):
return f'Alas ({str(self.config.DropRecord_AzurStatsID)})'
def _upload(self, image, genre, filename):
"""
Args:
image: Image to upload.
genre (str):
filename (str): 'xxx.png'
Returns:
bool: If success
"""
output = io.BytesIO()
Image.fromarray(image, mode='RGB').save(output, format='png')
output.seek(0)
data = {'file': (filename, output, 'image/png')}
headers = {'user-agent': self._user_agent}
session = requests.Session()
session.trust_env = False
session.mount('http://', HTTPAdapter(max_retries=5))
session.mount('https://', HTTPAdapter(max_retries=5))
try:
resp = session.post(self._api, files=data, headers=headers, timeout=self.TIMEOUT)
except Exception as e:
logger.warning(f'Image upload failed, {e}')
return False
if resp.status_code == 200:
# print(resp.text)
info = json.loads(resp.text)
code = info.get("code", 500)
if code == 200:
logger.info(f'Image upload success, imgid: {info.get("imgid", "")}')
return True
elif code == 0:
logger.warning(f'Image upload failed, msg: {info.get("msg", "")}')
return False
logger.warning(f'Image upload failed, unexpected server returns, '
f'status_code: {resp.status_code}, returns: {resp.text}')
return False
def _save(self, image, genre, filename):
"""
Args:
image: Image to save.
genre (str): Name of sub folder.
filename (str): 'xxx.png'
Returns:
bool: If success
"""
try:
folder = os.path.join(str(self.config.DropRecord_SaveFolder), genre)
os.makedirs(folder, exist_ok=True)
file = os.path.join(folder, filename)
save_image(image, file)
logger.info(f'Image save success, file: {file}')
return True
except Exception as e:
logger.exception(e)
return False
def commit(self, images, genre, save=False, upload=False, info=''):
"""
Args:
images (list): List of images in numpy array.
genre (str):
save (bool): If save image to local file system.
upload (bool): If upload image to Azur Stats.
info (str): Extra info append to filename.
Returns:
bool: If commit.
"""
if len(images) == 0:
return False
save, upload = bool(save), bool(upload)
logger.info(f'Drop record commit, genre={genre}, amount={len(images)}, save={save}, upload={upload}')
image = pack(images)
now = int(time.time() * 1000)
if info:
filename = f'{now}_{info}.png'
else:
filename = f'{now}.png'
if save:
self._save(image, genre=genre, filename=filename)
if upload:
self._upload(image, genre=genre, filename=filename)
return True
def new(self, genre, method='do_not', info=''):
"""
Args:
genre (str):
method (str): The method about save and upload image.
info (str): Extra info append to filename.
Returns:
DropImage:
"""
save = 'save' in method
upload = 'upload' in method
return DropImage(stat=self, genre=genre, save=save, upload=upload, info=info) |
85 | remove punc | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# The following code has been taken from
# https://github.com/facebookresearch/DPR, which is CC-BY-NC 4.0
# licensed as of now. More details on the license can be found
# at https://github.com/facebookresearch/DPR/blob/master/LICENSE
"""
Set of utilities for Q&A results validation tasks - Retriver passage
validation and Reader predicted answer validation
"""
import collections
import logging
import string
import unicodedata
from functools import partial
from multiprocessing import Pool as ProcessPool
from typing import Tuple, List, Dict
import regex as re
from tasks.orqa.unsupervised.tokenizers import SimpleTokenizer
logger = logging.getLogger(__name__)
QAMatchStats = collections.namedtuple('QAMatchStats', ['top_k_hits',\
'questions_doc_hits'])
def calculate_matches(all_docs: Dict[object, Tuple[str, str]],
answers: List[List[str]], closest_docs: List[Tuple[List[object],
List[float]]], workers_num: int, match_type: str) -> QAMatchStats:
"""
Evaluates answers presence in the set of documents. This function is
supposed to be used with a large collection of documents and results.
It internally forks multiple sub-processes for evaluation and then
merges results
:param all_docs: dictionary of the entire documents database.
doc_id -> (doc_text, title)
:param answers: list of answers's list. One list per question
:param closest_docs: document ids of the top results along with their
scores
:param workers_num: amount of parallel threads to process data
:param match_type: type of answer matching. Refer to has_answer code for
available options
:return: matching information tuple.
top_k_hits - a list where the index is the amount of top documents retrieved
and the value is the total amount of valid matches across an entire
dataset.
questions_doc_hits - more detailed info with answer matches for every
question and every retrieved document
"""
global dpr_all_documents
dpr_all_documents = all_docs
tok_opts = {}
tokenizer = SimpleTokenizer(**tok_opts)
processes = ProcessPool(
processes=workers_num,
)
logger.info('Matching answers in top docs...')
get_score_partial = partial(check_answer, match_type=match_type,
tokenizer=tokenizer)
questions_answers_docs = zip(answers, closest_docs)
scores = processes.map(get_score_partial, questions_answers_docs)
logger.info('Per question validation results len=%d', len(scores))
n_docs = len(closest_docs[0][0])
top_k_hits = [0] * n_docs
for question_hits in scores:
best_hit = next((i for i, x in enumerate(question_hits) if x), None)
if best_hit is not None:
top_k_hits[best_hit:] = [v + 1 for v in top_k_hits[best_hit:]]
return QAMatchStats(top_k_hits, scores)
def check_answer(questions_answers_docs, tokenizer, match_type) -> List[bool]:
"""
Search through all the top docs to see if they have any of the answers.
"""
answers, (doc_ids, doc_scores) = questions_answers_docs
global dpr_all_documents
hits = []
for i, doc_id in enumerate(doc_ids):
doc = dpr_all_documents[doc_id]
text = doc[0]
answer_found = False
if text is None: # cannot find the document for some reason
logger.warning("no doc in db")
hits.append(False)
continue
if has_answer(answers, text, tokenizer, match_type):
answer_found = True
hits.append(answer_found)
return hits
def has_answer(answers, text, tokenizer, match_type) -> bool:
"""
Check if a document contains an answer string.
If `match_type` is string, token matching is done between the text
and answer.
If `match_type` is regex, we search the whole text with the regex.
"""
text = _normalize(text)
if match_type == 'string':
# Answer is a list of possible strings
text = tokenizer.tokenize(text).words(uncased=True)
for single_answer in answers:
single_answer = _normalize(single_answer)
single_answer = tokenizer.tokenize(single_answer)
single_answer = single_answer.words(uncased=True)
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
elif match_type == 'regex':
# Answer is a regex
for single_answer in answers:
single_answer = _normalize(single_answer)
if regex_match(text, single_answer):
return True
return False
def regex_match(text, pattern):
"""Test if a regex pattern is contained within a text."""
try:
pattern = re.compile(
pattern,
flags=re.IGNORECASE + re.UNICODE + re.MULTILINE,
)
except BaseException:
return False
return pattern.search(text) is not None
# function for the reader model answer validation
def exact_match_score(prediction, ground_truth):
return _normalize_answer(prediction) == _normalize_answer(ground_truth)
def _normalize_answer(s):
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def METHOD_NAME(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(METHOD_NAME(lower(s))))
def _normalize(text):
return unicodedata.normalize('NFD', text) |
86 | get imported resource | from typing import List, Tuple
class ImportStatement:
"""Represent an import in a module
`readonly` attribute controls whether this import can be changed
by import actions or not.
"""
def __init__(
self, import_info, start_line, end_line, main_statement=None, blank_lines=0
):
self.start_line = start_line
self.end_line = end_line
self.readonly = False
self.main_statement = main_statement
self._import_info = None
self.import_info = import_info
self._is_changed = False
self.new_start = None
self.blank_lines = blank_lines
def _get_import_info(self):
return self._import_info
def _set_import_info(self, new_import):
if (
not self.readonly
and new_import is not None
and not new_import == self._import_info
):
self._is_changed = True
self._import_info = new_import
import_info = property(_get_import_info, _set_import_info)
def get_import_statement(self):
if self._is_changed or self.main_statement is None:
return self.import_info.get_import_statement()
else:
return self.main_statement
def empty_import(self):
self.import_info = ImportInfo.get_empty_import()
def move(self, lineno, blank_lines=0):
self.new_start = lineno
self.blank_lines = blank_lines
def get_old_location(self):
return self.start_line, self.end_line
def get_new_start(self):
return self.new_start
def is_changed(self):
return self._is_changed or (
self.new_start is not None or self.new_start != self.start_line
)
def accept(self, visitor):
return visitor.dispatch(self)
class ImportInfo:
def get_imported_primaries(self, context):
pass
def get_imported_names(self, context):
return [
primary.split(".")[0] for primary in self.get_imported_primaries(context)
]
def get_import_statement(self):
pass
def is_empty(self):
pass
def __hash__(self):
return hash(self.get_import_statement())
def _are_name_and_alias_lists_equal(self, list1, list2):
if len(list1) != len(list2):
return False
for pair1, pair2 in zip(list1, list2):
if pair1 != pair2:
return False
return True
def __eq__(self, obj):
return (
isinstance(obj, self.__class__)
and self.get_import_statement() == obj.get_import_statement()
)
def __ne__(self, obj):
return not self.__eq__(obj)
@staticmethod
def get_empty_import():
return EmptyImport()
class NormalImport(ImportInfo):
def __init__(self, names_and_aliases):
self.names_and_aliases = names_and_aliases
def get_imported_primaries(self, context):
result = []
for name, alias in self.names_and_aliases:
if alias:
result.append(alias)
else:
result.append(name)
return result
def get_import_statement(self):
result = "import "
for name, alias in self.names_and_aliases:
result += name
if alias:
result += " as " + alias
result += ", "
return result[:-2]
def is_empty(self):
return len(self.names_and_aliases) == 0
class FromImport(ImportInfo):
def __init__(self, module_name, level, names_and_aliases):
self.module_name = module_name
self.level = level
self.names_and_aliases = names_and_aliases
def get_imported_primaries(self, context):
if self.names_and_aliases[0][0] == "*":
module = self.get_imported_module(context)
return [name for name in module if not name.startswith("_")]
result = []
for name, alias in self.names_and_aliases:
if alias:
result.append(alias)
else:
result.append(name)
return result
def METHOD_NAME(self, context):
"""Get the imported resource
Returns `None` if module was not found.
"""
if self.level == 0:
return context.project.find_module(self.module_name, folder=context.folder)
else:
return context.project.find_relative_module(
self.module_name, context.folder, self.level
)
def get_imported_module(self, context):
"""Get the imported `PyModule`
Raises `rope.base.exceptions.ModuleNotFoundError` if module
could not be found.
"""
if self.level == 0:
return context.project.get_module(self.module_name, context.folder)
else:
return context.project.get_relative_module(
self.module_name, context.folder, self.level
)
def get_import_statement(self):
result = "from " + "." * self.level + self.module_name + " import "
for name, alias in self.names_and_aliases:
result += name
if alias:
result += " as " + alias
result += ", "
return result[:-2]
def is_empty(self):
return len(self.names_and_aliases) == 0
def is_star_import(self):
return len(self.names_and_aliases) > 0 and self.names_and_aliases[0][0] == "*"
class EmptyImport(ImportInfo):
names_and_aliases: List[Tuple[str, str]] = []
def is_empty(self):
return True
def get_imported_primaries(self, context):
return []
class ImportContext:
def __init__(self, project, folder):
self.project = project
self.folder = folder |
87 | get contents | #!/bin/true
#
# util.py - part of autospec
# Copyright (C) 2015 Intel Corporation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import hashlib
import os
import re
import shlex
import subprocess
import sys
dictionary_filename = os.path.dirname(__file__) + "/translate.dic"
dictionary = [line.strip() for line in open(dictionary_filename, 'r')]
os_paths = None
def call(command, logfile=None, check=True, **kwargs):
"""Subprocess.call convenience wrapper."""
returncode = 1
full_args = {
"args": shlex.split(command),
"universal_newlines": True,
}
full_args.update(kwargs)
if logfile:
full_args["stdout"] = open(logfile, "w")
full_args["stderr"] = subprocess.STDOUT
returncode = subprocess.call(**full_args)
full_args["stdout"].close()
else:
returncode = subprocess.call(**full_args)
if check and returncode != 0:
raise subprocess.CalledProcessError(returncode, full_args["args"], None)
return returncode
def _file_write(self, s):
s = s.strip()
if not s.endswith("\n"):
s += "\n"
self.write(s)
def translate(package):
"""Convert terms to their alternate definition."""
global dictionary
for item in dictionary:
if item.startswith(package + "="):
return item.split("=")[1]
return package
def do_regex(patterns, re_str):
"""Find a match in multiple patterns."""
for p in patterns:
match = re.search(p, re_str)
if match:
return match
def METHOD_NAME(filename):
"""Get contents of filename."""
with open(filename, "rb") as f:
return f.read()
return None
def get_sha1sum(filename):
"""Get sha1 sum of filename."""
sh = hashlib.sha1()
sh.update(METHOD_NAME(filename))
return sh.hexdigest()
def _supports_color():
# FIXME: check terminfo instead
return sys.stdout.isatty()
def _print_message(message, level, color=None):
prefix = level
if color and _supports_color():
# FIXME: use terminfo instead
if color == 'red':
params = '31;1'
elif color == 'green':
params = '32;1'
elif color == 'yellow':
params = '33;1'
elif color == 'blue':
params = '34;1'
prefix = f'\033[{params}m{level}\033[0m'
print(f'[{prefix}] {message}')
def print_error(message):
"""Print error, color coded for TTYs."""
_print_message(message, 'ERROR', 'red')
def print_fatal(message):
"""Print fatal error, color coded for TTYs."""
_print_message(message, 'FATAL', 'red')
def print_warning(message):
"""Print warning, color coded for TTYs."""
_print_message(message, 'WARNING', 'red')
def print_info(message):
"""Print informational message, color coded for TTYs."""
_print_message(message, 'INFO', 'yellow')
def print_success(message):
"""Print success message, color coded for TTYs."""
_print_message(message, 'SUCCESS', 'green')
def binary_in_path(binary):
"""Determine if the given binary exists in the provided filesystem paths."""
global os_paths
if not os_paths:
os_paths = os.getenv("PATH", default="/usr/bin:/bin").split(os.pathsep)
for path in os_paths:
if os.path.exists(os.path.join(path, binary)):
return True
return False
def write_out(filename, content, mode="w"):
"""File.write convenience wrapper."""
with open_auto(filename, mode) as require_f:
require_f.write(content)
def open_auto(*args, **kwargs):
"""Open a file with UTF-8 encoding.
Open file with UTF-8 encoding and "surrogate" escape characters that are
not valid UTF-8 to avoid data corruption.
"""
# 'encoding' and 'errors' are fourth and fifth positional arguments, so
# restrict the args tuple to (file, mode, buffering) at most
assert len(args) <= 3
assert 'encoding' not in kwargs
assert 'errors' not in kwargs
return open(*args, encoding="utf-8", errors="surrogateescape", **kwargs) |
88 | disable | # encoding:utf-8
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import logging
import os
import urllib.error
import urllib.parse
from gi.repository import Gtk
from gi.repository import GLib
from xl import common, event, providers, settings
from xl.nls import gettext as _
from xlgui import panel
from . import preferences
import gi
try:
gi.require_version('WebKit2', '4.1')
except:
gi.require_version('WebKit2', '4.0')
from gi.repository import WebKit2
log = logging.getLogger(__name__)
# fmt: off
LANGUAGES = ["ab", "aa", "af", "ak", "sq", "am", "ar", "an", "hy", "as", "av",
"ae", "ay", "az", "bm", "ba", "eu", "be", "bn", "bh", "bi", "bs", "br", "bg",
"my", "ca", "ch", "ce", "ny", "cv", "kw", "co", "cr", "hr", "cs", "da", "dv",
"nl", "dz", "en", "eo", "et", "ee", "fo", "fj", "fi", "fr", "ff", "gl", "ka",
"de", "el", "gn", "gu", "ht", "ha", "he", "hz", "hi", "ho", "hu", "ia", "id",
"ie", "ga", "ig", "ik", "io", "is", "it", "iu", "jv", "kl", "kn", "kr", "kk",
"km", "ki", "rw", "ky", "kv", "kg", "kj", "la", "lb", "lg", "li", "ln", "lo",
"lt", "lv", "gv", "mk", "mg", "ml", "mt", "mi", "mr", "mh", "mn", "na", "nv",
"nb", "nd", "ne", "ng", "nn", "no", "ii", "nr", "oc", "oj", "cu", "om", "or",
"os", "pi", "fa", "pl", "ps", "pt", "qu", "rm", "rn", "ro", "ru", "sa", "sc",
"se", "sm", "sg", "sr", "gd", "sn", "si", "sk", "sl", "so", "st", "es", "su",
"sw", "ss", "sv", "ta", "te", "th", "ti", "bo", "tk", "tl", "tn", "to", "tr",
"ts", "tw", "ty", "uk", "ur", "ve", "vi", "vk", "vo", "wa", "cy", "wo", "fy",
"xh", "yi", "yo", "za", "zu"]
# fmt: on
class WikipediaPlugin:
__exaile = None
__wiki_panel = None
def enable(self, exaile):
self.__exaile = exaile
def METHOD_NAME(self, _exaile):
providers.unregister('main-panel', self.__wiki_panel)
self.__wiki_panel.destroy()
self.__exaile = None
self.__wiki_panel = None
def on_gui_loaded(self):
user_agent = self.__exaile.get_user_agent_string('wikipedia')
self.__wiki_panel = WikiPanel(self.__exaile.gui.main.window, user_agent)
providers.register('main-panel', self.__wiki_panel)
def get_preferences_pane(self):
return preferences
plugin_class = WikipediaPlugin
class BrowserPage(WebKit2.WebView):
def __init__(self, builder, user_agent):
WebKit2.WebView.__init__(self)
self.hometrack = None
self.__user_agent = user_agent
builder.connect_signals(self)
event.add_callback(self.on_playback_start, 'playback_track_start')
def destroy(self):
event.remove_callback(self.on_playback_start, 'playback_track_start')
def on_playback_start(self, type, player, track):
self.hometrack = track
self.load_wikipedia_page(track)
def on_home_button_clicked(self, button):
if self.hometrack is not None:
self.load_wikipedia_page(self.hometrack)
def on_refresh_button_clicked(self, button):
self.reload()
def on_back_button_clicked(self, button):
self.go_back()
def on_forward_button_clicked(self, button):
self.go_forward()
@common.threaded
def load_wikipedia_page(self, track):
if track != self.hometrack:
return
artist = track.get_tag_display('artist')
language = settings.get_option('plugin/wikipedia/language', 'en')
if language not in LANGUAGES:
log.error('Provided language "%s" not found.' % language)
language = 'en'
artist = urllib.parse.quote(artist.encode('utf-8'), '')
url = "https://%s.m.wikipedia.org/wiki/Special:Search/%s" % (language, artist)
try:
html = common.get_url_contents(url, self.__user_agent)
if not isinstance(html, str):
html = html.decode("utf-8")
except urllib.error.URLError as e:
log.error(e)
log.error(
"Error occurred when trying to retrieve Wikipedia page "
"for %s." % artist
)
html = (
"""
<p style="color: red">No Wikipedia page found for <strong>%s</strong></p>
"""
% artist
)
GLib.idle_add(self.load_html, html, url)
class WikiPanel(panel.Panel):
# Specifies the path to the UI file and the name of the root element
ui_info = (os.path.dirname(__file__) + "/data/wikipanel.ui", 'WikiPanel')
def __init__(self, parent, user_agent):
panel.Panel.__init__(self, parent, 'wikipedia', _('Wikipedia'))
self.parent = parent
self._browser = BrowserPage(self.builder, user_agent)
self.setup_widgets()
def destroy(self):
self._browser.destroy()
def setup_widgets(self):
self._scrolled_window = Gtk.ScrolledWindow()
self._scrolled_window.add(self._browser)
frame = self.builder.get_object('rendering_frame')
self._scrolled_window.show_all()
frame.add(self._scrolled_window) |
89 | change position |
from django.apps.registry import apps
from django.db.models import Q
from rest_framework.decorators import action
from rest_framework.response import Response
from drfautoapi.drfautoapi import ApiViewSetConstrutor, \
customize, wrapper_queryset_response_for_drf_action
from sapl.api.permissions import SaplModelPermissions
from sapl.materia.models import TipoMateriaLegislativa, Tramitacao,\
MateriaLegislativa, Proposicao
ApiViewSetConstrutor.build_class(
[
apps.get_app_config('materia')
]
)
@customize(Proposicao)
class _ProposicaoViewSet:
"""
list:
Retorna lista de Proposições
* Permissões:
* Usuário Dono:
* Pode listar todas suas Proposições
* Usuário Conectado ou Anônimo:
* Pode listar todas as Proposições incorporadas
retrieve:
Retorna uma proposição passada pelo 'id'
* Permissões:
* Usuário Dono:
* Pode recuperar qualquer de suas Proposições
* Usuário Conectado ou Anônimo:
* Pode recuperar qualquer das proposições incorporadas
"""
class ProposicaoPermission(SaplModelPermissions):
def has_permission(self, request, view):
if request.method == 'GET':
return True
# se a solicitação é list ou detail, libera o teste de permissão
# e deixa o get_queryset filtrar de acordo com a regra de
# visibilidade das proposições, ou seja:
# 1. proposição incorporada é proposição pública
# 2. não incorporada só o autor pode ver
else:
perm = super().has_permission(request, view)
return perm
# não é list ou detail, então passa pelas regras de permissão e,
# depois disso ainda passa pelo filtro de get_queryset
permission_classes = (ProposicaoPermission,)
def get_queryset(self):
qs = super().get_queryset()
q = Q(data_recebimento__isnull=False, object_id__isnull=False)
if not self.request.user.is_anonymous:
autor_do_usuario_logado = self.request.user.autor_set.first()
# se usuário logado é operador de algum autor
if autor_do_usuario_logado:
q = Q(autor=autor_do_usuario_logado)
# se é operador de protocolo, ve qualquer coisa enviada
if self.request.user.has_perm('protocoloadm.list_protocolo'):
q = Q(data_envio__isnull=False) | Q(
data_devolucao__isnull=False)
qs = qs.filter(q)
return qs
@customize(MateriaLegislativa)
class _MateriaLegislativaViewSet:
class Meta:
ordering = ['-ano', 'tipo', 'numero']
@action(detail=True, methods=['GET'])
def ultima_tramitacao(self, request, *args, **kwargs):
materia = self.get_object()
if not materia.tramitacao_set.exists():
return Response({})
ultima_tramitacao = materia.tramitacao_set.order_by(
'-data_tramitacao', '-id').first()
serializer_class = ApiViewSetConstrutor.get_viewset_for_model(
Tramitacao).serializer_class(ultima_tramitacao)
return Response(serializer_class.data)
@action(detail=True, methods=['GET'])
def anexadas(self, request, *args, **kwargs):
self.queryset = self.get_object().anexadas.all()
return self.list(request, *args, **kwargs)
@customize(TipoMateriaLegislativa)
class _TipoMateriaLegislativaViewSet:
@action(detail=True, methods=['POST'])
def METHOD_NAME(self, request, *args, **kwargs):
result = {
'status': 200,
'message': 'OK'
}
d = request.data
if 'pos_ini' in d and 'pos_fim' in d:
if d['pos_ini'] != d['pos_fim']:
pk = kwargs['pk']
TipoMateriaLegislativa.objects.reposicione(pk, d['pos_fim'])
return Response(result) |
90 | test loaders convert context to values | """
Tests for salt.loader.lazy
"""
import sys
import pytest
import salt.loader
import salt.loader.context
import salt.loader.lazy
import salt.utils.files
@pytest.fixture
def loader_dir(tmp_path):
"""
Create a simple directory with a couple modules to load and run tests
against.
"""
mod_contents = """
def __virtual__():
return True
def set_context(key, value):
__context__[key] = value
def get_context(key):
return __context__[key]
"""
with pytest.helpers.temp_file(
"mod_a.py", directory=tmp_path, contents=mod_contents
), pytest.helpers.temp_file("mod_b.py", directory=tmp_path, contents=mod_contents):
yield str(tmp_path)
def test_loaders_have_uniq_context(loader_dir):
"""
Loaded functions run in the LazyLoader's context.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
loader_2 = salt.loader.lazy.LazyLoader([loader_dir], opts)
loader_1._load_all()
loader_2._load_all()
assert loader_1.pack["__context__"] == {}
assert loader_2.pack["__context__"] == {}
loader_1["mod_a.set_context"]("foo", "bar")
assert loader_1.pack["__context__"] == {"foo": "bar"}
assert loader_1["mod_b.get_context"]("foo") == "bar"
with pytest.raises(KeyError):
loader_2["mod_a.get_context"]("foo")
assert loader_2.pack["__context__"] == {}
def test_loaded_methods_are_loaded_func(loader_dir):
"""
Functions loaded from LazyLoader's item lookups are LoadedFunc objects
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
fun = loader_1["mod_a.get_context"]
assert isinstance(fun, salt.loader.lazy.LoadedFunc)
def test_loaded_modules_are_loaded_mods(loader_dir):
"""
Modules looked up as attributes of LazyLoaders are LoadedMod objects.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
mod = loader_1.mod_a
assert isinstance(mod, salt.loader.lazy.LoadedMod)
def test_loaders_create_named_loader_contexts(loader_dir):
"""
LazyLoader's create NamedLoaderContexts on the modules the load.
"""
opts = {"optimization_order": [0, 1, 2]}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
mod = loader_1.mod_a
assert isinstance(mod.mod, str)
func = mod.set_context
assert isinstance(func, salt.loader.lazy.LoadedFunc)
module_name = func.func.__module__
module = sys.modules[module_name]
assert isinstance(module.__context__, salt.loader.context.NamedLoaderContext)
wrapped_module_name = func.__module__
wrapped_module = sys.modules[wrapped_module_name]
assert isinstance(
wrapped_module.__context__, salt.loader.context.NamedLoaderContext
)
assert module is wrapped_module
def METHOD_NAME(loader_dir):
"""
LazyLoaders convert NamedLoaderContexts to values when instantiated.
"""
loader_context = salt.loader.context.LoaderContext()
grains_default = {
"os": "linux",
}
grains = salt.loader.context.NamedLoaderContext(
"grains", loader_context, grains_default
)
opts = {
"optimization_order": [0, 1, 2],
"grains": grains,
}
loader_1 = salt.loader.lazy.LazyLoader([loader_dir], opts)
assert loader_1.opts["grains"] == grains_default
# The loader's opts is a copy
assert opts["grains"] == grains
def test_missing_loader_from_salt_internal_loaders():
with pytest.raises(RuntimeError):
salt.loader._module_dirs(
{"extension_modules": "/tmp/foo"}, "missingmodules", "module"
)
def test_loader_pack_always_has_opts(loader_dir):
loader = salt.loader.lazy.LazyLoader([loader_dir], opts={"foo": "bar"})
assert "__opts__" in loader.pack
assert "foo" in loader.pack["__opts__"]
assert loader.pack["__opts__"]["foo"] == "bar"
def test_loader_pack_opts_not_overwritten(loader_dir):
opts = {"foo": "bar"}
loader = salt.loader.lazy.LazyLoader(
[loader_dir],
opts={"foo": "bar"},
pack={"__opts__": {"baz": "bif"}},
)
assert "__opts__" in loader.pack
assert "foo" not in loader.pack["__opts__"]
assert "baz" in loader.pack["__opts__"]
assert loader.pack["__opts__"]["baz"] == "bif" |
91 | config boolean | """Provides the code to load PRAW's configuration file ``praw.ini``."""
from __future__ import annotations
import configparser
import os
import sys
from pathlib import Path
from threading import Lock
from .exceptions import ClientException
class _NotSet:
def __bool__(self) -> bool:
return False
__nonzero__ = __bool__
def __str__(self) -> str:
return "NotSet"
class Config:
"""A class containing the configuration for a Reddit site."""
CONFIG = None
CONFIG_NOT_SET = _NotSet() # Represents a config value that is not set.
LOCK = Lock()
INTERPOLATION_LEVEL = {
"basic": configparser.BasicInterpolation,
"extended": configparser.ExtendedInterpolation,
}
@staticmethod
def METHOD_NAME(item: bool | str) -> bool: # noqa: ANN001
if isinstance(item, bool):
return item
return item.lower() in {"1", "yes", "true", "on"}
@classmethod
def _load_config(cls, *, config_interpolation: str | None = None): # noqa: ANN001
"""Attempt to load settings from various praw.ini files."""
if config_interpolation is not None:
interpolator_class = cls.INTERPOLATION_LEVEL[config_interpolation]()
else:
interpolator_class = None
config = configparser.ConfigParser(interpolation=interpolator_class)
module_dir = Path(sys.modules[__name__].__file__).parent
if "APPDATA" in os.environ: # Windows
os_config_path = Path(os.environ["APPDATA"])
elif "XDG_CONFIG_HOME" in os.environ: # Modern Linux
os_config_path = Path(os.environ["XDG_CONFIG_HOME"])
elif "HOME" in os.environ: # Legacy Linux
os_config_path = Path(os.environ["HOME"]) / ".config"
else:
os_config_path = None
locations = [str(module_dir / "praw.ini"), "praw.ini"]
if os_config_path is not None:
locations.insert(1, str(os_config_path / "praw.ini"))
config.read(locations)
cls.CONFIG = config
@property
def short_url(self) -> str:
"""Return the short url.
:raises: :class:`.ClientException` if it is not set.
"""
if self._short_url is self.CONFIG_NOT_SET:
msg = "No short domain specified."
raise ClientException(msg)
return self._short_url
def __init__(
self,
site_name: str,
config_interpolation: str | None = None,
**settings: str,
):
"""Initialize a :class:`.Config` instance."""
with Config.LOCK:
if Config.CONFIG is None:
self._load_config(config_interpolation=config_interpolation)
self._settings = settings
self.custom = dict(Config.CONFIG.items(site_name), **settings)
self.client_id = self.client_secret = self.oauth_url = None
self.reddit_url = self.refresh_token = self.redirect_uri = None
self.password = self.user_agent = self.username = None
self._initialize_attributes()
def _fetch(self, key): # noqa: ANN001
value = self.custom[key]
del self.custom[key]
return value
def _fetch_default(self, key, *, default=None): # noqa: ANN001
if key not in self.custom:
return default
return self._fetch(key)
def _fetch_or_not_set(self, key): # noqa: ANN001
if key in self._settings: # Passed in values have the highest priority
return self._fetch(key)
env_value = os.getenv(f"praw_{key}")
ini_value = self._fetch_default(key) # Needed to remove from custom
# Environment variables have higher priority than praw.ini settings
return env_value or ini_value or self.CONFIG_NOT_SET
def _initialize_attributes(self): # noqa: ANN001
self._short_url = self._fetch_default("short_url") or self.CONFIG_NOT_SET
self.check_for_async = self.METHOD_NAME(
self._fetch_default("check_for_async", default=True)
)
self.check_for_updates = self.METHOD_NAME(
self._fetch_or_not_set("check_for_updates")
)
self.warn_comment_sort = self.METHOD_NAME(
self._fetch_default("warn_comment_sort", default=True)
)
self.warn_additional_fetch_params = self.METHOD_NAME(
self._fetch_default("warn_additional_fetch_params", default=True)
)
self.kinds = {
x: self._fetch(f"{x}_kind")
for x in [
"comment",
"message",
"redditor",
"submission",
"subreddit",
"trophy",
]
}
for attribute in (
"client_id",
"client_secret",
"redirect_uri",
"refresh_token",
"password",
"user_agent",
"username",
):
setattr(self, attribute, self._fetch_or_not_set(attribute))
for required_attribute in (
"oauth_url",
"ratelimit_seconds",
"reddit_url",
"timeout",
):
setattr(self, required_attribute, self._fetch(required_attribute))
for attribute, conversion in {
"ratelimit_seconds": int,
"timeout": int,
}.items():
try:
setattr(self, attribute, conversion(getattr(self, attribute)))
except ValueError:
msg = f"An incorrect config type was given for option {attribute}. The expected type is {conversion.__name__}, but the given value is {getattr(self, attribute)}."
raise ValueError(msg) from None |
92 | job count | """
Job scheduler
Dispatch jobs with each submitted calculation
Common methods:
has_results(): returns whether there are any results available
results(): return an iterator yielding ( identifier, result ) tuples
submit(target, args = (), kwargs = {}): submits job, return identifier
is_empty(): returns whether there are no more jobs or results waiting
is_full(): returns whether the number of currently processing jobs is at maximum
shutdown(): no more job is submitted
resume(): continues to process waiting jobs
join(): shutdown, and finish processing all currently running jobs
terminate(): kills all processing
Scheduler methods:
job_count(): return number of unfinished jobs (waiting + running)
process_count(): return number of running processes
"""
from __future__ import absolute_import, division, print_function
import time
from collections import deque
from six.moves.queue import Empty
from libtbx.scheduling import result
from libtbx.scheduling import identifier
# Capacity modes
class limited(object):
"""
Limited number of jobs
"""
def __init__(self, njobs):
self.njobs = njobs
def is_full(self, njobs):
return self.njobs <= njobs
def reduce_capacity_if_possible(self, target = None):
if target is None or target >= self.njobs:
target = self.njobs - 1
if target > 0:
self.njobs = target
return True # success
else:
self.njobs = 1
return False
class unlimited(object):
"""
Unlimited number of jobs (to be used with submission queue
Note: this is a singleton object
"""
@staticmethod
def is_full(njobs):
return False
def job_cycle(outqueue, jobid, target, args, kwargs):
try:
value = target( *args, **kwargs )
except Exception as e:
res = result.error( exception = e, traceback = result.get_traceback_info() )
else:
res = result.success( value = value )
outqueue.put( ( jobid, res ) )
class manager(object):
"""
Job scheduler
"""
def __init__(self, inqueue, job_factory, capacity, waittime = 0.01):
self.inqueue = inqueue
self.job_factory = job_factory
self.capacity = capacity
self.waittime = waittime
self.process_data_for = {}
self.waiting_results = set()
self.waiting_jobs = deque()
self.completed_results = deque()
self.resume()
def METHOD_NAME(self):
return len( self.process_data_for ) + len( self.waiting_jobs )
def process_count(self):
return len( self.process_data_for )
def is_empty(self):
return not (
self.waiting_jobs or self.process_data_for or self.waiting_results
or self.completed_results
)
def is_full(self):
return self.capacity.is_full( njobs = self.process_count() )
def has_results(self):
return self.completed_results
def results(self):
self.poll()
while (
self.process_data_for or self.waiting_results or self.completed_results
or ( self.waiting_jobs and self.active )
):
while not self.has_results():
self.wait()
self.poll()
yield self.completed_results.popleft()
def submit(self, target, args = (), kwargs = {}):
jobid = identifier()
self.waiting_jobs.append( ( jobid, target, args, kwargs ) )
self.poll()
return jobid
def shutdown(self):
self.active = False
def resume(self):
self.active = True
def join(self):
while self.process_data_for:
self.poll()
self.wait()
self.poll()
def terminate(self):
self.shutdown()
for process in self.process_data_for.values():
if process.is_alive():
if hasattr( process, "terminate" ): # Thread has no terminate
try:
process.terminate()
except Exception:
pass
self.join()
# Internal methods
def wait(self):
time.sleep( self.waittime )
def poll(self):
# Check existing jobs
for jobid in list(self.process_data_for):
process = self.process_data_for[ jobid ]
if not process.is_alive():
self.finish_job( jobid = jobid )
# Collect results
while True:
try:
( jobid, res ) = self.inqueue.get( timeout = self.waittime )
except Empty:
break
if jobid in self.process_data_for:
self.finish_job( jobid = jobid )
self.waiting_results.remove( jobid )
self.completed_results.append( ( jobid, res ) )
# Submit new jobs
while ( not self.capacity.is_full( njobs = self.process_count() )
and self.waiting_jobs and self.active ):
( jobid, target, args, kwargs ) = self.waiting_jobs.popleft()
process = self.job_factory(
target = job_cycle,
args = ( self.inqueue, jobid, target, args, kwargs ),
)
try:
process.start()
except Exception as e:
# It will crash if process cannot start. See if we can just reduce
# capacity
if hasattr(self.capacity, 'reduce_capacity_if_possible'):
ok = self.capacity.reduce_capacity_if_possible(
target = self.process_count())
if ok:
continue # back to top
raise Exception(e) # Process could not start
self.process_data_for[ jobid ] = process
def finish_job(self, jobid):
process = self.process_data_for[ jobid ]
process.join()
exit_code = getattr( process, "exitcode", 0 ) # Thread has no "exitcode" attribute
if exit_code != 0:
res = result.error(
exception = result.get_exception( process = process, exit_code = exit_code ),
traceback = result.get_crash_info( process = process ),
)
self.completed_results.append( ( jobid, res ) )
else:
self.waiting_results.add( jobid )
del self.process_data_for[ jobid ]
class creator(object):
"""
Information to create and destroy manager
"""
def __init__(self, job_factory, queue_factory, capacity, waittime = 0.01):
self.job_factory = job_factory
self.queue_factory = queue_factory
self.capacity = capacity
self.waittime = waittime
def create(self):
return manager(
inqueue = self.queue_factory.create(),
job_factory = self.job_factory,
capacity = self.capacity,
waittime = self.waittime,
)
def destroy(self, manager):
manager.terminate()
manager.join()
self.queue_factory.destroy( manager.inqueue ) |
93 | field2utext | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Converters
Used by `ZPublisher.HTTPRequest` and `OFS.PropertyManager`.
Binary converters (i.e. converters which use `bytes` for/in their result)
are marked with a true `binary` attribute`.
This allows the publisher to perform the conversion to `bytes`
based on its more precise encoding knowledge.
"""
import html
import json
import re
import warnings
from DateTime import DateTime
from DateTime.interfaces import SyntaxError
# This may get overwritten during configuration
default_encoding = 'utf-8'
def field2string(v):
"""Converts value to string."""
if isinstance(v, bytes):
return v.decode(default_encoding)
else:
return str(v)
def field2bytes(v):
"""Converts value to bytes."""
if hasattr(v, 'read'):
return v.read()
elif isinstance(v, str):
return v.encode(default_encoding)
else:
return bytes(v)
field2bytes.binary = True
def field2text(value, nl=re.compile('\r\n|\n\r').search):
value = field2string(value)
match_object = nl(value)
if match_object is None:
return value
length = match_object.start(0)
result = []
start = 0
while length >= start:
result.append(value[start:length])
start = length + 2
match_object = nl(value, start)
if match_object is None:
length = -1
else:
length = match_object.start(0)
result.append(value[start:])
return '\n'.join(result)
def field2required(v):
v = field2string(v)
if v.strip():
return v
raise ValueError('No input for required field<p>')
def field2int(v):
if isinstance(v, (list, tuple)):
return list(map(field2int, v))
v = field2string(v)
if v:
try:
return int(v)
except ValueError:
raise ValueError(
"An integer was expected in the value %r" % html.escape(
v, quote=True
)
)
raise ValueError('Empty entry when <strong>integer</strong> expected')
def field2float(v):
if isinstance(v, (list, tuple)):
return list(map(field2float, v))
v = field2string(v)
if v:
try:
return float(v)
except ValueError:
raise ValueError(
"A floating-point number was expected in the value %r" %
html.escape(v, True)
)
raise ValueError(
'Empty entry when <strong>floating-point number</strong> expected')
def field2long(v):
if isinstance(v, (list, tuple)):
return list(map(field2long, v))
v = field2string(v)
# handle trailing 'L' if present.
if v[-1:] in ('L', 'l'):
v = v[:-1]
if v:
try:
return int(v)
except ValueError:
raise ValueError(
"A long integer was expected in the value %r" %
html.escape(
v, True))
raise ValueError('Empty entry when <strong>integer</strong> expected')
def field2tokens(v):
v = field2string(v)
return v.split()
def field2lines(v):
if isinstance(v, (list, tuple)):
return [field2string(item) for item in v]
return field2string(v).splitlines()
def field2date(v):
v = field2string(v)
try:
v = DateTime(v)
except SyntaxError:
raise SyntaxError("Invalid DateTime " + html.escape(repr(v), True))
return v
def field2date_international(v):
v = field2string(v)
try:
v = DateTime(v, datefmt="international")
except SyntaxError:
raise SyntaxError("Invalid DateTime " + html.escape(repr(v)))
return v
def field2boolean(v):
if v == 'False':
return False
return bool(v)
def field2ustring(v):
warnings.warn(
"The converter `(field2)ustring` is deprecated "
"and will be removed in Zope 6. "
"Please use `(field2)string` instead.",
DeprecationWarning)
return field2string(v)
def field2utokens(v):
warnings.warn(
"The converter `(field2)utokens` is deprecated "
"and will be removed in Zope 6. "
"Please use `(field2)tokens` instead.",
DeprecationWarning)
return field2tokens(v)
def METHOD_NAME(v):
warnings.warn(
"The converter `(field2)utext` is deprecated "
"and will be removed in Zope 6. "
"Please use `(field2)text` instead.",
DeprecationWarning)
return field2text(v)
def field2ulines(v):
warnings.warn(
"The converter `(field2u)lines` is deprecated "
"and will be removed in Zope 6. "
"Please use `(field2)lines` instead.",
DeprecationWarning)
return field2lines(v)
def field2json(v):
try:
v = json.loads(v)
except ValueError:
raise ValueError("Invalid json " + html.escape(repr(v), True))
return v
type_converters = {
'float': field2float,
'int': field2int,
'long': field2long,
'string': field2string,
'bytes': field2bytes,
'date': field2date,
'date_international': field2date_international,
'json': field2json,
'required': field2required,
'tokens': field2tokens,
'lines': field2lines,
'text': field2text,
'boolean': field2boolean,
'ustring': field2ustring,
'utokens': field2utokens,
'ulines': field2ulines,
'utext': METHOD_NAME,
}
get_converter = type_converters.get |
94 | schema read permission | # -*- coding: utf-8 -*-
#
# This file is part of CERN Analysis Preservation Framework.
# Copyright (C) 2016 CERN.
#
# CERN Analysis Preservation Framework is free software; you can redistribute
# it and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# CERN Analysis Preservation Framework is distributed in the hope that it will
# be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CERN Analysis Preservation Framework; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Utils for Schemas module."""
import re
from functools import wraps
from flask import abort, jsonify
from invenio_jsonschemas.errors import JSONSchemaNotFound
from jsonpatch import JsonPatchConflict
from .models import Schema
from .permissions import AdminSchemaPermission, ReadSchemaPermission
def is_later_version(version1, version2):
matched1 = re.match(r"(\d+)\.(\d+)\.(\d+)", version1)
matched2 = re.match(r"(\d+)\.(\d+)\.(\d+)", version2)
if not matched1 or not matched2:
raise ValueError(
'Version has to be passed as string <major>.<minor>.<patch>'
)
major1, minor1, patch1 = matched1.groups()
major2, minor2, patch2 = matched2.groups()
if major1 > major2:
return True
elif major1 < major2:
return False
elif major1 == major2:
if minor1 > minor2:
return True
elif minor1 < minor2:
return False
elif minor1 == minor2:
if patch1 > patch2:
return True
elif patch1 < patch2:
return False
elif patch1 == patch2:
return False
def check_allowed_patch_operation(data):
"""Return patch data after filtering in allowed operations."""
ALLOWED_OPERATIONS = ['add', 'remove', 'replace']
if not data or not isinstance(data, list):
return None
try:
data = [
operation
for operation in data
if operation.get('op', '') in ALLOWED_OPERATIONS
]
return data
except AttributeError:
return None
def check_allowed_patch_path(data):
"""Raise JsonPatchConflict for patching non-editable fields."""
EDITABLE_FIELDS = [
'fullname',
'use_deposit_as_record',
'deposit_mapping',
'record_mapping',
'deposit_options',
'record_options',
'config',
]
if not data:
return data
for operation in data:
_field = operation.get('path', '')
if re.match(r'^/', _field):
_field = _field.split('/')
else:
raise JsonPatchConflict
try:
if _field[1] not in EDITABLE_FIELDS:
raise JsonPatchConflict
except IndexError:
raise JsonPatchConflict
return data
def actions_from_type(_type, perms):
"""
Get user-made action names depending on the type.
When the type is record or deposit, the user should also
get schema-read access.
"""
if _type == "record":
return [f"record-schema-{perm}" for perm in perms]
elif _type == "deposit":
return [f"deposit-schema-{perm}" for perm in perms]
elif _type == "schema":
return [f"schema-object-{perm}" for perm in perms]
def get_default_mapping(name, version):
mapping_name = f"{name}-v{version}"
default_mapping = {"mappings": {}}
collectiion_mapping = {
"properties": {
"_collection": {
"type": "object",
"properties": {
"fullname": {"type": "keyword"},
"name": {"type": "keyword"},
"version": {"type": "keyword"},
},
}
}
}
default_mapping["mappings"][mapping_name] = collectiion_mapping
return default_mapping
def pass_schema(f):
"""Decorator to check if schema exists by name and/or version."""
@wraps(f)
def wrapper(*args, **kwargs):
name = kwargs.get('name')
version = kwargs.get('version')
if name:
try:
if version:
schema = Schema.get(name, version)
else:
schema = Schema.get_latest(name)
kwargs['schema'] = schema
except JSONSchemaNotFound:
return (
jsonify(
{
'message': 'Schema not found. Please try '
'again with existing schemas.'
}
),
404,
)
return f(*args, **kwargs)
return wrapper
def pass_schema_versions(f):
"""Decorator to return all schema by name."""
@wraps(f)
def wrapper(*args, **kwargs):
name = kwargs.get('name')
if name:
try:
schemas = Schema.get_all_versions(name)
kwargs['schemas'] = schemas
except (JSONSchemaNotFound, IndexError):
return (
jsonify(
{
'message': 'Schema not found. Please try '
'again with existing schemas.'
}
),
404,
)
return f(*args, **kwargs)
return wrapper
def schema_admin_permission(f):
"""Decorator to check if user has admin permission."""
@wraps(f)
def wrapper(
self=None, name=None, version=None, schema=None, *args, **kwargs
):
if not AdminSchemaPermission(schema).can():
abort(403)
return f(
self=self,
name=name,
version=version,
schema=schema,
*args,
**kwargs,
)
return wrapper
def METHOD_NAME(f):
"""Decorator to check if user has read permission."""
@wraps(f)
def wrapper(
self=None, name=None, version=None, schema=None, *args, **kwargs
):
if not ReadSchemaPermission(schema).can():
abort(403)
return f(
self=self,
name=name,
version=version,
schema=schema,
*args,
**kwargs,
)
return wrapper |
95 | update releases | import logging
import shutil
import tkinter as tk
import webbrowser
from tkinter import ttk
from modlunky2.config import Config
from modlunky2.ui.widgets import ToolTip
from modlunky2.utils import open_directory
logger = logging.getLogger(__name__)
class ControlsFrame(ttk.Frame):
def __init__(self, parent, play_tab, modlunky_config: Config, *args, **kwargs):
logger.debug("Initializing Playlunky ControlsFrame")
super().__init__(parent, *args, **kwargs)
self.parent = parent
self.modlunky_config = modlunky_config
self.play_tab = play_tab
self.columnconfigure(0, weight=1)
self.refresh_button = ttk.Button(
self, text="Refresh Mods", command=self.refresh_mods
)
self.refresh_button.grid(row=0, column=0, pady=3, padx=10, sticky="nswe")
ToolTip(
self.refresh_button,
(
"If you've made any changes in the Packs directory\n"
"that you want updated in the mod list."
),
)
self.open_packs_button = ttk.Button(
self, text="Open Packs Directory", command=self.open_packs
)
self.open_packs_button.grid(row=1, column=0, pady=3, padx=10, sticky="nswe")
ToolTip(self.open_packs_button, ("Open the directory where Packs are saved"))
self.guide_button = ttk.Button(self, text="User Guide", command=self.guide)
self.guide_button.grid(row=2, column=0, pady=3, padx=10, sticky="nswe")
ToolTip(self.guide_button, ("Open the User Guide"))
self.update_releases_button = ttk.Button(
self, text="Update Releases", command=self.METHOD_NAME
)
self.update_releases_button.grid(
row=3, column=0, pady=3, padx=10, sticky="nswe"
)
ToolTip(
self.update_releases_button,
(
"If you want to check for a new version of Playlunky\n"
"you can force an update with this button."
),
)
self.check_fyi_updates_button = ttk.Button(
self, text="Check for Mod Updates", command=self.check_fyi_updates
)
self.check_fyi_updates_button.grid(
row=4, column=0, pady=3, padx=10, sticky="nswe"
)
ToolTip(
self.check_fyi_updates_button,
("Check to see if any mods have updates available."),
)
self.clear_cache_button = ttk.Button(
self, text="Clear Cache", command=self.clear_cache
)
self.clear_cache_button.grid(row=5, column=0, pady=3, padx=10, sticky="nswe")
ToolTip(
self.clear_cache_button,
(
"Remove Playlunky cache. This could be helpful\n"
"if things aren't working as expected."
),
)
def on_load(self):
if self.modlunky_config.spelunky_fyi_api_token:
self.check_fyi_updates_button["state"] = tk.NORMAL
else:
self.check_fyi_updates_button["state"] = tk.DISABLED
def refresh_mods(self):
self.play_tab.on_load()
def open_packs(self):
if not self.modlunky_config.install_dir:
return
packs_dir = self.modlunky_config.install_dir / "Mods/Packs"
if not packs_dir.exists():
logger.info("Couldn't find Packs directory. Looked in %s", packs_dir)
return
open_directory(packs_dir)
def guide(self):
webbrowser.open_new_tab("https://github.com/spelunky-fyi/Playlunky/wiki")
def METHOD_NAME(self):
self.play_tab.version_frame.task_manager.call("play:cache_releases")
def check_fyi_updates(self):
self.play_tab.packs_frame.cache_fyi_pack_details()
def clear_cache(self):
if not self.modlunky_config.install_dir:
return
cache_dir = self.modlunky_config.install_dir / "Mods/Packs/.db"
if not cache_dir.exists():
logger.info("No cache directory found to remove. Looked in %s", cache_dir)
return
answer = tk.messagebox.askokcancel(
title="Confirmation",
message=(
"Are you sure you want to remove Playlunky cache?\n"
"\n"
f"This will remove {cache_dir} and all of its contents."
),
icon=tk.messagebox.WARNING,
)
if not answer:
return
shutil.rmtree(cache_dir) |
96 | test one typevar | from typing import *
from typing import Callable, ForwardRef
from pytest import skip
from python_ta.typecheck.base import (
TypeConstraints,
TypeFail,
TypeInfo,
TypeResult,
create_Callable,
)
skip_msg = "Skipped"
tc = TypeConstraints()
# Helper functions
def unify_helper(arg1: type, arg2: type, exp_result: Union[type, TypeFail]):
unify_result = TypeInfo(arg1) >> (lambda t1: TypeInfo(arg2) >> (lambda t2: tc.unify(t1, t2)))
if isinstance(exp_result, TypeFail):
assert isinstance(unify_result, TypeFail)
else:
assert tc.resolve(unify_result.getValue()).getValue() == tc.resolve(exp_result).getValue()
def setup_typevar(t: type):
tv = tc.fresh_tvar(None)
tc.unify(tv, t)
return tv
def resolve_helper(t: type, exp_type: type):
assert tc.resolve(t).getValue() == exp_type
# Unify primitives
def test_same_prim():
unify_helper(bool, bool, bool)
unify_helper(int, int, int)
unify_helper(str, str, str)
def test_diff_prim():
unify_helper(bool, str, TypeFail(f"Incompatible Types {bool} and {str}"))
unify_helper(int, str, TypeFail(f"Incompatible Types {int} and {str}"))
unify_helper(bool, int, TypeFail(f"Incompatible Types {bool} and {int}"))
unify_helper(float, int, TypeFail(f"Incompatible Types {float} and {int}"))
unify_helper(float, str, TypeFail(f"Incompatible Types {float} and {str}"))
# Unify TypeVars
def test_same_typevars():
tc.reset()
tv1 = setup_typevar(str)
tv2 = setup_typevar(str)
resolve_helper(tv1, str)
resolve_helper(tv2, str)
unify_helper(tv1, tv2, tv1)
def test_same_typevars_flipped():
tc.reset()
tv1 = setup_typevar(str)
tv2 = setup_typevar(str)
resolve_helper(tv1, str)
resolve_helper(tv2, str)
unify_helper(tv1, tv2, tv2)
def test_diff_typevars():
tc.reset()
tv_str = setup_typevar(str)
tv_int = setup_typevar(int)
resolve_helper(tv_str, str)
resolve_helper(tv_int, int)
unify_helper(tv_int, tv_str, TypeFail(f"Incompatible Types {str} and {int}"))
def METHOD_NAME():
tc.reset()
tv = setup_typevar(str)
resolve_helper(tv, str)
unify_helper(tv, str, str)
unify_helper(str, tv, str)
unify_helper(tv, int, TypeFail(f"Incompatible Types {str} and {int}"))
unify_helper(int, tv, TypeFail(f"Incompatible Types {int} and {str}"))
def test_two_typevar():
tc.reset()
tv1 = setup_typevar(bool)
tv2 = tc.fresh_tvar(None)
unify_helper(tv1, tv2, bool)
# Unify ForwardRefs
def test_same_forward_ref():
fr1 = ForwardRef("a")
fr2 = ForwardRef("a")
unify_helper(fr1, fr2, fr1)
unify_helper(fr1, fr2, fr2)
def test_diff_forward_ref():
skip("The existing error msg does not apply to this situation")
fr1 = ForwardRef("a")
fr2 = ForwardRef("b")
unify_helper(fr1, fr2, TypeFail("Attempted to unify forwardref with non-ref"))
def test_one_forward_ref():
fr = ForwardRef("a")
unify_helper(fr, str, TypeFail("Attempted to unify forwardref with non-ref"))
# Unify Tuples
def test_same_tuple():
unify_helper(Tuple[int, int], Tuple[int, int], Tuple[int, int])
unify_helper(Tuple[str, str], Tuple[str, str], Tuple[str, str])
def test_diff_tuple():
unify_helper(
Tuple[int, int],
Tuple[str, str],
TypeFail(f"Incompatible Types {Tuple[int, int]} and {Tuple[str, str]}"),
)
def test_nested_tuples():
unify_helper(
Tuple[str, Tuple[str, bool]], Tuple[str, Tuple[str, bool]], Tuple[str, Tuple[str, bool]]
)
def test_typevars_tuple():
tv1 = tc.fresh_tvar(None)
tv2 = tc.fresh_tvar(None)
unify_helper(Tuple[tv1, tv2], Tuple[str, bool], Tuple[str, bool])
resolve_helper(tv1, str)
resolve_helper(tv2, bool)
def test_typevars_nested_tuples():
tv1 = tc.fresh_tvar(None)
tv2 = Tuple[tv1, bool]
unify_helper(tv2, Tuple[Tuple[str, bool], bool], Tuple[Tuple[str, bool], bool])
resolve_helper(tv1, Tuple[str, bool])
resolve_helper(tv2, Tuple[Tuple[str, bool], bool])
def test_diff_nested_tuples():
unify_helper(
Tuple[str, Tuple[str, str]],
Tuple[str, Tuple[bool, str]],
TypeFail(
f"Incompatible Types {Tuple[str, Tuple[str, str]]} and {Tuple[str, Tuple[bool, str]]}"
),
)
# Unify list
def test_same_list():
unify_helper(List[str], List[str], List[str])
unify_helper(List[int], List[int], List[int])
def test_diff_list():
unify_helper(List[str], List[int], TypeFail(f"Incompatible Types {List[str]} and {List[int]}"))
# Unify callables
def test_same_callable():
tc.reset()
c1 = Callable[[bool], bool]
c2 = Callable[[bool], bool]
unify_helper(c1, c2, c1)
unify_helper(c1, c2, c2)
unify_helper(c2, c1, c1)
unify_helper(c2, c1, c2)
def test_diff_callable():
c1 = Callable[[bool], bool]
c2 = Callable[[str], str]
unify_helper(c1, c2, TypeFail(f"Incompatible Types {c1} and {c2}"))
# Polymorphic types
def test_simple_polymorphic_call():
tc.reset()
tv1 = tc.fresh_tvar()
tv2 = tc.fresh_tvar()
fn1 = create_Callable([tv1, tv2], bool, {tv1, tv2})
fn2 = create_Callable([int, int], bool)
unify_helper(fn1, fn2, Callable[[int, int], bool])
def test_higher_order_polymorphic_call():
tc.reset()
tv1 = tc.fresh_tvar()
tv2 = tc.fresh_tvar()
fn0 = create_Callable([tv1, int], int, {tv1})
fn1 = create_Callable([int, int], int)
fn2 = create_Callable([fn0, int], bool)
fn3 = create_Callable([fn1, int], bool)
fn4 = create_Callable([tv2, int], bool, {tv2})
unify_helper(fn2, fn3, Callable[[Callable[[int, int], int], int], bool])
unify_helper(fn2, fn4, Callable[[Callable[[int, int], int], int], bool])
resolve_helper(tv1, int)
resolve_helper(tv2, Callable[[int, int], int])
# Union types
def test_simple_union():
tc.reset()
unify_helper(int, Union[int, str], int)
def test_tvar_union():
tc.reset()
unify_helper(tc.fresh_tvar(), Union[int, str], Union[int, str])
unify_helper(Union[int, str], tc.fresh_tvar(), Union[int, str])
def test_two_unions():
tc.reset()
unify_helper(Union[int, str, None], Union[bool, int], int)
def test_optional():
tc.reset()
tv1 = tc.fresh_tvar()
unify_helper(int, Optional[int], int)
unify_helper(type(None), Optional[int], type(None))
unify_helper(tv1, Optional[int], Optional[int]) |
97 | require 8021q | import errno
import os
import platform
import pwd
import re
import stat
import subprocess
import sys
import uuid
from socket import AF_INET, AF_INET6
import netaddr
import pytest
from pyroute2 import config
from pyroute2.iproute.linux import IPRoute
try:
import httplib
except ImportError:
import http.client as httplib
dtcd_uuid = str(uuid.uuid4())
# check the dtcd
try:
cx = httplib.HTTPConnection('localhost:7623')
cx.request('GET', '/v1/network/')
cx.getresponse()
has_dtcd = True
except:
has_dtcd = False
supernet = {
AF_INET: netaddr.IPNetwork('172.16.0.0/12'),
AF_INET6: netaddr.IPNetwork('fdb3:84e5:4ff4::/48'),
}
network_pool = {
AF_INET: list(supernet[AF_INET].subnet(24)),
AF_INET6: list(supernet[AF_INET6].subnet(64)),
}
allocations = {}
family_url = {AF_INET: 'ipv4', AF_INET6: 'ipv6'}
def allocate_network(family=AF_INET):
global dtcd_uuid
global network_pool
global allocations
network = None
try:
cx = httplib.HTTPConnection('localhost:7623')
cx.request(
'POST', '/v1/network/%s/' % family_url[family], body=dtcd_uuid
)
resp = cx.getresponse()
if resp.status == 200:
network = netaddr.IPNetwork(resp.read().decode('utf-8'))
cx.close()
except Exception:
pass
if network is None:
network = network_pool[family].pop()
allocations[network] = True
return network
def free_network(network, family=AF_INET):
global network_pool
global allocations
if network in allocations:
allocations.pop(network)
network_pool[family].append(network)
else:
cx = httplib.HTTPConnection('localhost:7623')
cx.request(
'DELETE', '/v1/network/%s/' % family_url[family], body=str(network)
)
cx.getresponse()
cx.close()
def conflict_arch(arch):
if platform.machine().find(arch) >= 0:
pytest.skip('conflict with architecture %s' % (arch))
def kernel_version_ge(major, minor):
# True if running kernel is >= X.Y
if config.kernel[0] > major:
return True
if config.kernel[0] < major:
return False
if minor and config.kernel[1] < minor:
return False
return True
def require_kernel(major, minor=None):
if not kernel_version_ge(major, minor):
pytest.skip('incompatible kernel version')
def require_python(target):
if sys.version_info[0] != target:
pytest.skip('test requires Python %i' % target)
def METHOD_NAME():
try:
os.stat('/proc/net/vlan/config')
except OSError as e:
# errno 2 'No such file or directory'
if e.errno == 2:
pytest.skip('missing 8021q support, or module is not loaded')
raise
def require_bridge():
with IPRoute() as ip:
try:
ip.link('add', ifname='test_req', kind='bridge')
except Exception:
pytest.skip('can not create <bridge>')
idx = ip.link_lookup(ifname='test_req')
if not idx:
pytest.skip('can not create <bridge>')
ip.link('del', index=idx)
def require_bond():
with IPRoute() as ip:
try:
ip.link('add', ifname='test_req', kind='bond')
except Exception:
pytest.skip('can not create <bond>')
idx = ip.link_lookup(ifname='test_req')
if not idx:
pytest.skip('can not create <bond>')
ip.link('del', index=idx)
def require_user(user):
if bool(os.environ.get('PYROUTE2_TESTS_RO', False)):
pytest.skip('read-only tests requested')
if pwd.getpwuid(os.getuid()).pw_name != user:
pytest.skip('required user %s' % (user))
def require_executable(name):
try:
with open(os.devnull, 'w') as fnull:
subprocess.check_call(['which', name], stdout=fnull, stderr=fnull)
except Exception:
pytest.skip('required %s not found' % (name))
def remove_link(name):
if os.getuid() != 0:
return
with open(os.devnull, 'w') as fnull:
subprocess.call(
['ip', 'link', 'del', 'dev', name], stdout=fnull, stderr=fnull
)
while True:
links = get_ip_link()
if name not in links:
break
def create_link(name, kind):
if os.getuid() != 0:
return
subprocess.call(['ip', 'link', 'add', 'dev', name, 'type', kind])
for i in range(20):
links = get_ip_link()
if name in links:
return
raise Exception("interface not created")
def _check_output(*argv):
# we can not use check_output, as it does not exist in 2.6
process = subprocess.Popen(argv, stdout=subprocess.PIPE)
ret = process.communicate()
return ret[0].decode('utf-8').split('\n')
def grep(command, pattern=None):
out = _check_output(*command.split())
ret = []
reg = re.compile(pattern)
for string in out:
if reg.search(string):
ret.append(string)
return ret
def get_ip_addr(interface=None):
argv = ['ip', '-o', 'ad']
if interface:
argv.extend(['li', 'dev', interface])
out = _check_output(*argv)
ret = []
for string in out:
fields = string.split()
if len(fields) >= 5 and fields[2][:4] == 'inet':
ret.append(fields[3])
return ret
def get_ip_brd(interface=None):
argv = ['ip', '-o', 'ad']
if interface:
argv.extend(['li', 'dev', interface])
out = _check_output(*argv)
ret = []
for string in out:
fields = string.split()
if len(fields) >= 5 and fields[4] == 'brd':
ret.append(fields[5])
return ret
def get_ip_link():
ret = []
out = _check_output('ip', '-o', 'li')
for string in out:
fields = string.split()
if len(fields) >= 2:
ret.append(fields[1][:-1].split('@')[0])
return ret
def get_ip_default_routes():
ret = []
out = _check_output('ip', '-4', 'ro')
for string in out:
if 'default' in string:
ret.append(string)
return ret
def get_ip_rules(proto='-4'):
ret = []
out = _check_output('ip', proto, 'rule', 'show')
for string in out:
if len(string):
ret.append(string)
return ret
def count_socket_fds():
pid_fd = '/proc/%s/fd' % os.getpid()
sockets = 0
for fd in os.listdir(pid_fd):
try:
if stat.S_ISSOCK(os.stat(os.path.join(pid_fd, fd)).st_mode):
sockets += 1
except OSError as e:
if e.errno != errno.ENOENT:
raise
return sockets |
98 | loc directive regex | from numba import cuda, float32, int32
from numba.core.errors import NumbaInvalidConfigWarning
from numba.cuda.testing import CUDATestCase, skip_on_cudasim
from numba.tests.support import ignore_internal_warnings
import re
import unittest
import warnings
@skip_on_cudasim('Simulator does not produce lineinfo')
class TestCudaLineInfo(CUDATestCase):
def METHOD_NAME(self):
# This is used in several tests
pat = (
r'\.loc' # .loc directive beginning
r'\s+[0-9]+' # whitespace then file index
r'\s+[0-9]+' # whitespace then line number
r'\s+[0-9]+' # whitespace then column position
)
return re.compile(pat)
def _check(self, fn, sig, expect):
fn.compile(sig)
llvm = fn.inspect_llvm(sig)
ptx = fn.inspect_asm(sig)
assertfn = self.assertIsNotNone if expect else self.assertIsNone
# DICompileUnit debug info metadata should all be of the
# DebugDirectivesOnly kind, and not the FullDebug kind
pat = (
r'!DICompileUnit\(.*' # Opening of DICompileUnit metadata. Since
# the order of attributes is not
# guaranteed, we need to match arbitrarily
# afterwards.
r'emissionKind:\s+' # The emissionKind attribute followed by
# whitespace.
r'DebugDirectivesOnly' # The correct emissionKind.
)
match = re.compile(pat).search(llvm)
assertfn(match, msg=ptx)
pat = (
r'!DICompileUnit\(.*' # Same as the pattern above, but for the
r'emissionKind:\s+' # incorrect FullDebug emissionKind.
r'FullDebug' #
)
match = re.compile(pat).search(llvm)
self.assertIsNone(match, msg=ptx)
# The name of this file should be present in the line mapping
# if lineinfo was propagated through correctly.
pat = (
r'\.file' # .file directive beginning
r'\s+[0-9]+\s+' # file number surrounded by whitespace
r'".*test_lineinfo.py"' # filename in quotes, ignoring full path
)
match = re.compile(pat).search(ptx)
assertfn(match, msg=ptx)
# .loc directives should be present in the ptx
self.METHOD_NAME().search(ptx)
assertfn(match, msg=ptx)
# Debug info sections should not be present when only lineinfo is
# generated
pat = (
r'\.section\s+' # .section directive beginning
r'\.debug_info' # Section named ".debug_info"
)
match = re.compile(pat).search(ptx)
self.assertIsNone(match, msg=ptx)
def test_no_lineinfo_in_asm(self):
@cuda.jit(lineinfo=False)
def foo(x):
x[0] = 1
self._check(foo, sig=(int32[:],), expect=False)
def test_lineinfo_in_asm(self):
@cuda.jit(lineinfo=True)
def foo(x):
x[0] = 1
self._check(foo, sig=(int32[:],), expect=True)
def test_lineinfo_maintains_error_model(self):
sig = (float32[::1], float32[::1])
@cuda.jit(sig, lineinfo=True)
def divide_kernel(x, y):
x[0] /= y[0]
llvm = divide_kernel.inspect_llvm(sig)
# When the error model is Python, the device function returns 1 to
# signal an exception (e.g. divide by zero) has occurred. When the
# error model is the default NumPy one (as it should be when only
# lineinfo is enabled) the device function always returns 0.
self.assertNotIn('ret i32 1', llvm)
def test_no_lineinfo_in_device_function(self):
# Ensure that no lineinfo is generated in device functions by default.
@cuda.jit
def callee(x):
x[0] += 1
@cuda.jit
def caller(x):
x[0] = 1
callee(x)
sig = (int32[:],)
self._check(caller, sig=sig, expect=False)
def test_lineinfo_in_device_function(self):
# First we define a device function / kernel pair and run the usual
# checks on the generated LLVM and PTX.
@cuda.jit(lineinfo=True)
def callee(x):
x[0] += 1
@cuda.jit(lineinfo=True)
def caller(x):
x[0] = 1
callee(x)
sig = (int32[:],)
self._check(caller, sig=sig, expect=True)
# Now we can check the PTX of the device function specifically.
ptx = caller.inspect_asm(sig)
ptxlines = ptx.splitlines()
# Check that there is no device function in the PTX
# A line beginning with ".weak .func" that identifies a device function
devfn_start = re.compile(r'^\.weak\s+\.func')
for line in ptxlines:
if devfn_start.match(line) is not None:
self.fail(f"Found device function in PTX:\n\n{ptx}")
# Scan for .loc directives that refer to an inlined device function
loc_directive = self.METHOD_NAME()
found = False
for line in ptxlines:
if loc_directive.search(line) is not None:
if 'inlined_at' in line:
found = True
break
if not found:
self.fail(f'No .loc directive with inlined_at info found'
f'in:\n\n{ptx}')
# We also inspect the LLVM to ensure that there's debug info for each
# subprogram (function). A lightweight way to check this is to ensure
# that we have as many DISubprograms as we expect.
llvm = caller.inspect_llvm(sig)
subprograms = 0
for line in llvm.splitlines():
if 'distinct !DISubprogram' in line:
subprograms += 1
# One DISubprogram for each of:
# - The kernel wrapper
# - The caller
# - The callee
expected_subprograms = 3
self.assertEqual(subprograms, expected_subprograms,
f'"Expected {expected_subprograms} DISubprograms; '
f'got {subprograms}')
def test_debug_and_lineinfo_warning(self):
with warnings.catch_warnings(record=True) as w:
ignore_internal_warnings()
# We pass opt=False to prevent the warning about opt and debug
# occurring as well
@cuda.jit(debug=True, lineinfo=True, opt=False)
def f():
pass
self.assertEqual(len(w), 1)
self.assertEqual(w[0].category, NumbaInvalidConfigWarning)
self.assertIn('debug and lineinfo are mutually exclusive',
str(w[0].message))
if __name__ == '__main__':
unittest.main() |
99 | on click | # -*- coding: utf-8 -*-
# vStream https://github.com/Kodi-vStream/venom-xbmc-addons
# Pour l'utiliser
# from resources.lib.captcha import Captcha_Get_Reponse
try:
import urllib2
except ImportError:
import urllib.request as urllib2
import xbmcvfs
import xbmc
import xbmcgui
from resources.lib.comaddon import dialog
newMethod = True
dialogs = dialog()
def Captcha_Get_Reponse(img, cookie):
# on telecharge l'image
# PathCache = xbmc.translatePath(xbmcaddon.Addon("plugin.video.vstream").getAddonInfo("profile"))
# filename = os.path.join(PathCache, "Captcha.raw").decode("utf-8")
filename = "special://home/userdata/addon_data/plugin.video.vstream/Captcha.raw"
headers2 = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:37.0) Gecko/20100101 Firefox/37.0",
# "Referer": url,
"Host": "protect.ddl-island.su",
"Accept": "image/png,image/*;q=0.8,*/*;q=0.5",
"Accept-Language": "fr-FR,fr;q=0.8,en-US;q=0.6,en;q=0.4",
"Accept-Encoding": "gzip, deflate",
# "Content-Type": "application/x-www-form-urlencoded",
}
if cookie:
headers2["Cookie"] = cookie
try:
req = urllib2.Request(img, None, headers2)
image_on_web = urllib2.urlopen(req)
if image_on_web.headers.maintype == "image":
buf = image_on_web.read()
downloaded_image = xbmcvfs.File(filename, "wb")
downloaded_image.write(buf)
downloaded_image.close()
image_on_web.close()
else:
return ""
except:
return ""
# on affiche le dialogue
solution = ""
if newMethod:
# nouveau captcha
try:
# affichage du dialog perso
class XMLDialog(xbmcgui.WindowXMLDialog):
# """
# Dialog class for captcha
# """
def __init__(self, *args, **kwargs):
xbmcgui.WindowXMLDialog.__init__(self)
pass
def onInit(self):
# image background captcha
self.getControl(1).setImage(filename.encode("utf-8"), False)
# image petit captcha memory fail
self.getControl(2).setImage(filename.encode("utf-8"), False)
self.getControl(2).setVisible(False)
# Focus clavier
self.setFocus(self.getControl(21))
def METHOD_NAME(self, controlId):
if controlId == 20:
# button Valider
solution = self.getControl(5000).getLabel()
xbmcgui.Window(10101).setProperty("captcha", str(solution))
self.close()
return
elif controlId == 30:
# button fermer
self.close()
return
elif controlId == 21:
# button clavier
self.getControl(2).setVisible(True)
kb = xbmc.Keyboard(self.getControl(5000).getLabel(), "", False)
kb.doModal()
if kb.isConfirmed():
self.getControl(5000).setLabel(kb.getText())
self.getControl(2).setVisible(False)
else:
self.getControl(2).setVisible(False)
def onFocus(self, controlId):
self.controlId = controlId
def _close_dialog(self):
self.close()
def onAction(self, action):
# touche return 61448
if action.getId() in (9, 10, 11, 30, 92, 216, 247, 257, 275, 61467, 61448):
self.close()
path = "special://home/addons/plugin.video.vstream"
wd = XMLDialog("DialogCaptcha.xml", path, "default", "720p")
wd.doModal()
del wd
finally:
solution = xbmcgui.Window(10101).getProperty("captcha")
if solution == "":
dialogs.VSinfo("Vous devez taper le captcha")
else:
# ancien Captcha
try:
img = xbmcgui.ControlImage(450, 0, 400, 130, filename.encode("utf-8"))
wdlg = xbmcgui.WindowDialog()
wdlg.addControl(img)
wdlg.show()
# xbmc.sleep(3000)
kb = xbmc.Keyboard("", "Tapez les Lettres/chiffres de l'image", False)
kb.doModal()
if kb.isConfirmed():
solution = kb.getText()
if solution == "":
dialogs.VSinfo("Vous devez taper le captcha")
else:
dialogs.VSinfo("Vous devez taper le captcha")
finally:
wdlg.removeControl(img)
wdlg.close()
return solution |