content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A standalone utility for computing the log moments.
The utility for computing the log moments. It consists of two methods.
compute_log_moment(q, sigma, T, lmbd) computes the log moment with sampling
probability q, noise sigma, order lmbd, and T steps. get_privacy_spent computes
delta (or eps) given log moments and eps (or delta).
Example use:
Suppose that we have run an algorithm with parameters, an array of
(q1, sigma1, T1) ... (qk, sigmak, Tk), and we wish to compute eps for a given
delta. The example code would be:
max_lmbd = 32
lmbds = xrange(1, max_lmbd + 1)
log_moments = []
for lmbd in lmbds:
log_moment = 0
for q, sigma, T in parameters:
log_moment += compute_log_moment(q, sigma, T, lmbd)
log_moments.append((lmbd, log_moment))
eps, delta = get_privacy_spent(log_moments, target_delta=delta)
To verify that the I1 >= I2 (see comments in GaussianMomentsAccountant in
accountant.py for the context), run the same loop above with verify=True
passed to compute_log_moment.
"""
import math
import sys
import numpy as np
import scipy.integrate as integrate
import scipy.stats
#from sympy.mpmath import mp
import mpmath as mp
def _to_np_float64(v):
if math.isnan(v) or math.isinf(v):
return np.inf
return np.float64(v)
######################
# FLOAT64 ARITHMETIC #
######################
def pdf_gauss(x, sigma, mean=0):
return scipy.stats.norm.pdf(x, loc=mean, scale=sigma)
def cropped_ratio(a, b):
if a < 1E-50 and b < 1E-50:
return 1.
else:
return a / b
def integral_inf(fn):
integral, _ = integrate.quad(fn, -np.inf, np.inf)
return integral
def integral_bounded(fn, lb, ub):
integral, _ = integrate.quad(fn, lb, ub)
return integral
def distributions(sigma, q):
mu0 = lambda y: pdf_gauss(y, sigma=sigma, mean=0.0)
mu1 = lambda y: pdf_gauss(y, sigma=sigma, mean=1.0)
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
a_lambda_first_term_exact = 0
a_lambda_second_term_exact = 0
for i in range(lmbd_int + 1):
coef_i = scipy.special.binom(lmbd_int, i) * (q ** i)
s1, s2 = 0, 0
for j in range(i + 1):
coef_j = scipy.special.binom(i, j) * (-1) ** (i - j)
s1 += coef_j * np.exp((j * j - j) / (2.0 * (sigma ** 2)))
s2 += coef_j * np.exp((j * j + j) / (2.0 * (sigma ** 2)))
a_lambda_first_term_exact += coef_i * s1
a_lambda_second_term_exact += coef_i * s2
a_lambda_exact = ((1.0 - q) * a_lambda_first_term_exact +
q * a_lambda_second_term_exact)
if verbose:
print("A: by binomial expansion {} = {} + {}".format(
a_lambda_exact,
(1.0 - q) * a_lambda_first_term_exact,
q * a_lambda_second_term_exact))
return _to_np_float64(a_lambda_exact)
def compute_b(sigma, q, lmbd, verbose=False):
mu0, _, mu = distributions(sigma, q)
b_lambda_fn = lambda z: mu0(z) * np.power(cropped_ratio(mu0(z), mu(z)), lmbd)
b_lambda = integral_inf(b_lambda_fn)
m = sigma ** 2 * (np.log((2. - q) / (1. - q)) + 1. / (2 * sigma ** 2))
b_fn = lambda z: (np.power(mu0(z) / mu(z), lmbd) -
np.power(mu(-z) / mu0(z), lmbd))
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu0(z), mu(z)), lmbd))
b_lambda_int2_fn = lambda z: (mu0(z) *
np.power(cropped_ratio(mu(z), mu0(z)), lmbd))
b_int1 = integral_bounded(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B: by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
print(b_lambda, b_bound)
return _to_np_float64(b_lambda)
###########################
# MULTIPRECISION ROUTINES #
###########################
def pdf_gauss_mp(x, sigma, mean):
return mp.mpf(1.) / mp.sqrt(mp.mpf("2.") * sigma ** 2 * mp.pi) * mp.exp(
- (x - mean) ** 2 / (mp.mpf("2.") * sigma ** 2))
def integral_inf_mp(fn):
integral, _ = mp.quad(fn, [-mp.inf, mp.inf], error=True)
return integral
def integral_bounded_mp(fn, lb, ub):
integral, _ = mp.quad(fn, [lb, ub], error=True)
return integral
def distributions_mp(sigma, q):
mu0 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(0))
mu1 = lambda y: pdf_gauss_mp(y, sigma=sigma, mean=mp.mpf(1))
mu = lambda y: (1 - q) * mu0(y) + q * mu1(y)
return mu0, mu1, mu
def compute_a_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, mu1, mu = distributions_mp(sigma, q)
a_lambda_fn = lambda z: mu(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_first_term_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda_second_term_fn = lambda z: mu1(z) * (mu(z) / mu0(z)) ** lmbd_int
a_lambda = integral_inf_mp(a_lambda_fn)
a_lambda_first_term = integral_inf_mp(a_lambda_first_term_fn)
a_lambda_second_term = integral_inf_mp(a_lambda_second_term_fn)
if verbose:
print("A: by numerical integration {} = {} + {}".format(
a_lambda,
(1 - q) * a_lambda_first_term,
q * a_lambda_second_term))
return _to_np_float64(a_lambda)
def compute_b_mp(sigma, q, lmbd, verbose=False):
lmbd_int = int(math.ceil(lmbd))
if lmbd_int == 0:
return 1.0
mu0, _, mu = distributions_mp(sigma, q)
b_lambda_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda = integral_inf_mp(b_lambda_fn)
m = sigma ** 2 * (mp.log((2 - q) / (1 - q)) + 1 / (2 * (sigma ** 2)))
b_fn = lambda z: ((mu0(z) / mu(z)) ** lmbd_int -
(mu(-z) / mu0(z)) ** lmbd_int)
if verbose:
print("M =", m)
print("f(-M) = {} f(M) = {}".format(b_fn(-m), b_fn(m)))
assert b_fn(-m) < 0 and b_fn(m) < 0
b_lambda_int1_fn = lambda z: mu0(z) * (mu0(z) / mu(z)) ** lmbd_int
b_lambda_int2_fn = lambda z: mu0(z) * (mu(z) / mu0(z)) ** lmbd_int
b_int1 = integral_bounded_mp(b_lambda_int1_fn, -m, m)
b_int2 = integral_bounded_mp(b_lambda_int2_fn, -m, m)
a_lambda_m1 = compute_a_mp(sigma, q, lmbd - 1)
b_bound = a_lambda_m1 + b_int1 - b_int2
if verbose:
print("B by numerical integration", b_lambda)
print("B must be no more than ", b_bound)
assert b_lambda < b_bound + 1e-5
return _to_np_float64(b_lambda)
def _compute_delta(log_moments, eps):
"""Compute delta for given log_moments and eps.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
eps: the target epsilon.
Returns:
delta
"""
min_delta = 1.0
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
if log_moment < moment_order * eps:
min_delta = min(min_delta,
math.exp(log_moment - moment_order * eps))
return min_delta
def _compute_eps(log_moments, delta):
"""Compute epsilon for given log_moments and delta.
Args:
log_moments: the log moments of privacy loss, in the form of pairs
of (moment_order, log_moment)
delta: the target delta.
Returns:
epsilon
"""
min_eps = float("inf")
for moment_order, log_moment in log_moments:
if moment_order == 0:
continue
if math.isinf(log_moment) or math.isnan(log_moment):
sys.stderr.write("The %d-th order is inf or Nan\n" % moment_order)
continue
min_eps = min(min_eps, (log_moment - math.log(delta)) / moment_order)
return min_eps
def compute_log_moment(q, sigma, steps, lmbd, verify=False, verbose=False):
"""Compute the log moment of Gaussian mechanism for given parameters.
Args:
q: the sampling ratio.
sigma: the noise sigma.
steps: the number of steps.
lmbd: the moment order.
verify: if False, only compute the symbolic version. If True, computes
both symbolic and numerical solutions and verifies the results match.
verbose: if True, print out debug information.
Returns:
the log moment with type np.float64, could be np.inf.
"""
moment = compute_a(sigma, q, lmbd, verbose=verbose)
if verify:
mp.dps = 50
moment_a_mp = compute_a_mp(sigma, q, lmbd, verbose=verbose)
moment_b_mp = compute_b_mp(sigma, q, lmbd, verbose=verbose)
np.testing.assert_allclose(moment, moment_a_mp, rtol=1e-10)
if not np.isinf(moment_a_mp):
# The following test fails for (1, np.inf)!
np.testing.assert_array_less(moment_b_mp, moment_a_mp)
if np.isinf(moment):
return np.inf
else:
return np.log(moment) * steps
def get_privacy_spent(log_moments, target_eps=None, target_delta=None):
"""Compute delta (or eps) for given eps (or delta) from log moments.
Args:
log_moments: array of (moment_order, log_moment) pairs.
target_eps: if not None, the epsilon for which we would like to compute
corresponding delta value.
target_delta: if not None, the delta for which we would like to compute
corresponding epsilon value. Exactly one of target_eps and target_delta
is None.
Returns:
eps, delta pair
"""
assert (target_eps is None) ^ (target_delta is None)
assert not ((target_eps is None) and (target_delta is None))
if target_eps is not None:
return (target_eps, _compute_delta(log_moments, target_eps))
else:
return (_compute_eps(log_moments, target_delta), target_delta)
| 32.08642 | 80 | 0.653713 | [
"MIT"
] | DPBayes/ADADP | CIFAR_tests/gaussian_moments.py | 10,396 | Python |
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _("Unable to authenticate with provided credentials")
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 30.875 | 74 | 0.646038 | [
"MIT"
] | siddharthisaiah/recipe-app-api | app/user/serializers.py | 1,729 | Python |
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import memsource_cli
from memsource_cli.models.project_workflow_step_dto_v2 import ProjectWorkflowStepDtoV2 # noqa: E501
from memsource_cli.rest import ApiException
class TestProjectWorkflowStepDtoV2(unittest.TestCase):
"""ProjectWorkflowStepDtoV2 unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testProjectWorkflowStepDtoV2(self):
"""Test ProjectWorkflowStepDtoV2"""
# FIXME: construct object with mandatory attributes with example values
# model = memsource_cli.models.project_workflow_step_dto_v2.ProjectWorkflowStepDtoV2() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 32.317073 | 421 | 0.744151 | [
"Apache-2.0"
] | unofficial-memsource/memsource-cli | test/test_project_workflow_step_dto_v2.py | 1,325 | Python |
#!/usr/bin/env python3
# ----------------------------------------------------------------------------
#
# Copyright 2018 EMVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
# Standard library imports
# Related third party imports
from PyQt5.QtWidgets import QComboBox
from genicam2.gentl import NotImplementedException, NotAvailableException, \
InvalidParameterException
# Local application/library specific imports
from harvesters._private.core.observer import Observer
from harvesters_gui._private.frontend.pyqt5.helper import get_system_font
class ComboBoxDeviceList(QComboBox, Observer):
def __init__(self, parent=None):
super().__init__(parent)
self.setFont(get_system_font())
def update(self):
if self.parent().parent().harvester_core.has_revised_device_info_list:
self.clear()
separator = '::'
for d in self.parent().parent().harvester_core.device_info_list:
name = d.vendor
name += separator
name += d.model
try:
_ = d.serial_number
except: # We know it's too broad:
pass
else:
if d.serial_number != '':
name += separator
name += d.serial_number
try:
_ = d.user_defined_name
except: # We know it's too broad:
pass
else:
if d.user_defined_name != '':
name += separator
name += d.user_defined_name
self.addItem(name)
#
self.parent().parent().harvester_core.has_revised_device_info_list = False
#
enable = False
if self.parent().parent().cti_files:
if self.parent().parent().ia is None:
enable = True
self.setEnabled(enable)
| 33.96 | 82 | 0.563408 | [
"Apache-2.0"
] | willlewin/harvesters_gui | src/harvesters_gui/_private/frontend/pyqt5/device_list.py | 2,547 | Python |
"""Commonly used tensor functions."""
import math
from typing import Union, Optional
import tensorflow as tf
import numpy as np
from .factory import AbstractTensor
def binarize(tensor: tf.Tensor,
bitsize: Optional[int] = None) -> tf.Tensor:
"""Extract bits of values in `tensor`, returning a `tf.Tensor` with same
dtype."""
with tf.name_scope('binarize'):
bitsize = bitsize or (tensor.dtype.size * 8)
bit_indices_shape = [1] * len(tensor.shape) + [bitsize]
bit_indices = tf.range(bitsize, dtype=tensor.dtype)
bit_indices = tf.reshape(bit_indices, bit_indices_shape)
val = tf.expand_dims(tensor, -1)
val = tf.bitwise.bitwise_and(tf.bitwise.right_shift(val, bit_indices), 1)
assert val.dtype == tensor.dtype
return val
def bits(tensor: tf.Tensor, bitsize: Optional[int] = None) -> list:
"""Extract bits of values in `tensor`, returning a list of tensors."""
with tf.name_scope('bits'):
bitsize = bitsize or (tensor.dtype.size * 8)
the_bits = [
tf.bitwise.bitwise_and(tf.bitwise.right_shift(tensor, i), 1)
for i in range(bitsize)
]
return the_bits
# return tf.stack(bits, axis=-1)
def im2col(x: Union[tf.Tensor, np.ndarray],
h_filter: int,
w_filter: int,
padding: str,
stride: int) -> tf.Tensor:
"""Generic implementation of im2col on tf.Tensors."""
with tf.name_scope('im2col'):
# we need NHWC because tf.extract_image_patches expects this
nhwc_tensor = tf.transpose(x, [0, 2, 3, 1])
channels = int(nhwc_tensor.shape[3])
# extract patches
patch_tensor = tf.extract_image_patches(
nhwc_tensor,
ksizes=[1, h_filter, w_filter, 1],
strides=[1, stride, stride, 1],
rates=[1, 1, 1, 1],
padding=padding
)
# change back to NCHW
patch_tensor_nchw = tf.reshape(tf.transpose(patch_tensor, [3, 1, 2, 0]),
(h_filter, w_filter, channels, -1))
# reshape to x_col
x_col_tensor = tf.reshape(tf.transpose(patch_tensor_nchw, [2, 0, 1, 3]),
(channels * h_filter * w_filter, -1))
return x_col_tensor
def conv2d(x: AbstractTensor,
y: AbstractTensor,
stride,
padding) -> AbstractTensor:
"""Generic convolution implementation with im2col over AbstractTensors."""
with tf.name_scope('conv2d'):
h_filter, w_filter, in_filters, out_filters = map(int, y.shape)
n_x, c_x, h_x, w_x = map(int, x.shape)
if c_x != in_filters:
# in depthwise conv the filter's in and out dimensions are reversed
out_filters = in_filters
if padding == 'SAME':
h_out = int(math.ceil(float(h_x) / float(stride)))
w_out = int(math.ceil(float(w_x) / float(stride)))
elif padding == 'VALID':
h_out = int(math.ceil(float(h_x - h_filter + 1) / float(stride)))
w_out = int(math.ceil(float(w_x - w_filter + 1) / float(stride)))
else:
raise ValueError("Don't know padding method '{}'".format(padding))
x_col = x.im2col(h_filter, w_filter, padding, stride)
w_col = y.transpose([3, 2, 0, 1]).reshape([int(out_filters), -1])
out = w_col.matmul(x_col)
out = out.reshape([out_filters, h_out, w_out, n_x])
out = out.transpose([3, 0, 1, 2])
return out
| 30.787037 | 77 | 0.63218 | [
"Apache-2.0"
] | Arash-Afshar/tf-encrypted | tf_encrypted/tensor/shared.py | 3,325 | Python |
import sys
import utils
from statistics import Statistics
from connect import connect
import printer
from configloader import ConfigLoader
from rafflehandler import Rafflehandler
import rafflehandler
import OnlineHeart
import asyncio
from cmd import Cmd
def fetch_real_roomid(roomid):
if roomid:
real_roomid = [[roomid], utils.check_room]
else:
real_roomid = ConfigLoader().dic_user['other_control']['default_monitor_roomid']
return real_roomid
class Biliconsole(Cmd):
prompt = ''
def __init__(self, loop):
self.loop = loop
Cmd.__init__(self)
def guide_of_console(self):
print(' ___________________ ')
print('| 欢迎使用本控制台 |')
print('| 1 输出本次抽奖统计 |')
print('| 2 查看目前拥有礼物的统计 |')
print('| 3 查看持有勋章状态 |')
print('| 4 获取直播个人的基本信息 |')
print('| 5 检查今日任务的完成情况 |')
print('| 6 模拟电脑网页端发送弹幕 |')
print('| 7 直播间的长短号码的转化 |')
print('| 8 手动送礼物到指定直播间 |')
print('| 9 切换监听的直播间 |')
print('|10 控制弹幕的开关 |')
print('|11 房间号码查看主播 |')
print('|12 当前拥有的扭蛋币 |')
print('|13 开扭蛋币(一、十、百) |')
print('|16 尝试一次实物抽奖 |')
print('  ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ̄ ')
def default(self, line):
self.guide_of_console()
def emptyline(self):
self.guide_of_console()
def do_1(self, line):
Statistics.getlist()
def do_2(self, line):
self.append2list_console(utils.fetch_bag_list)
def do_3(self, line):
self.append2list_console(utils.fetch_medal)
def do_4(self, line):
self.append2list_console(utils.fetch_user_info)
def do_5(self, line):
self.append2list_console(utils.check_taskinfo)
def do_6(self, line):
msg = input('请输入要发送的信息:')
roomid = input('请输入要发送的房间号:')
real_roomid = fetch_real_roomid(roomid)
self.append2list_console([[msg, real_roomid], utils.send_danmu_msg_web])
def do_7(self, line):
roomid = input('请输入要转化的房间号:')
if not roomid:
roomid = ConfigLoader().dic_user['other_control']['default_monitor_roomid']
self.append2list_console([[roomid], utils.check_room])
def do_8(self, line):
self.append2list_console([[True], utils.fetch_bag_list])
bagid = input('请输入要发送的礼物编号:')
# print('是谁', giftid)
giftnum = int(input('请输入要发送的礼物数目:'))
roomid = input('请输入要发送的房间号:')
real_roomid = fetch_real_roomid(roomid)
self.append2list_console([[real_roomid, giftnum, bagid], utils.send_gift_web])
def do_9(self, line):
roomid = input('请输入roomid')
real_roomid = fetch_real_roomid(roomid)
self.append2list_console([[real_roomid], connect.reconnect])
def do_10(self, line):
new_words = input('弹幕控制')
if new_words == 'T':
printer.control_printer(True, None)
else:
printer.control_printer(False, None)
def do_11(self, line):
roomid = input('请输入roomid')
real_roomid = fetch_real_roomid(roomid)
self.append2list_console([[real_roomid], utils.fetch_liveuser_info])
def do_12(self, line):
self.append2list_console(utils.fetch_capsule_info)
def do_13(self, line):
count = input('请输入要开的扭蛋数目(1或10或100)')
self.append2list_console([[count], utils.open_capsule])
def do_14(self, line):
if sys.platform == 'ios':
roomid = input('请输入roomid')
real_roomid = fetch_real_roomid(roomid)
self.append2list_console([[real_roomid], utils.watch_living_video])
return
print('仅支持ios')
def do_15(self, line):
self.append2list_console(utils.TitleInfo)
def do_16(self, line):
self.append2list_console(OnlineHeart.draw_lottery)
def do_17(self, line):
new_words = input('debug控制')
if new_words == 'T':
printer.control_printer(None, True)
else:
printer.control_printer(None, True)
def do_18(self, line):
video_id = input('请输入av号')
num = input('输入数目')
self.append2list_console([[int(video_id), int(num)], utils.GiveCoin2Av])
def do_19(self, line):
try:
roomid = int(input('输入roomid'))
self.append2list_console([[(roomid,), rafflehandler.handle_1_room_guard], rafflehandler.Rafflehandler.Put2Queue_wait])
except:
pass
def do_check(self, line):
Rafflehandler.getlist()
Statistics.checklist()
def append2list_console(self, request):
asyncio.run_coroutine_threadsafe(self.excute_async(request), self.loop)
# inst.loop.call_soon_threadsafe(inst.queue_console.put_nowait, request)
async def excute_async(self, i):
if isinstance(i, list):
# 对10号单独简陋处理
for j in range(len(i[0])):
if isinstance(i[0][j], list):
# print('检测')
i[0][j] = await i[0][j][1](*(i[0][j][0]))
if i[1] == 'normal':
i[2](*i[0])
else:
await i[1](*i[0])
else:
await i()
| 31.94152 | 130 | 0.566459 | [
"MIT"
] | yjqiang/bilibili-live-tools | bili_console.py | 6,334 | Python |
import unittest
from _2015 import d22_wizard
class TestWizard(unittest.TestCase):
# damage, heal, acBonus, manaRecharge
def test_evaluateSpellEffects_effects_empty(self):
w = d22_wizard.Wizard(0, 0)
activeEffects = {}
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual([0, 0, 0, 0], effects)
def test_evaluateSpellEffects_activeEffects_empty(self):
w = d22_wizard.Wizard(0, 0)
activeEffects = {}
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({}, activeEffects)
def test_evaluateSpellEffects_effects_MagicMissile(self):
w = d22_wizard.Wizard(0, 0)
activeEffects = {"Magic Missile": 1}
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual([4, 0, 0, 0], effects)
def test_evaluateSpellEffects_activeEffects_MagicMissile(self):
w = d22_wizard.Wizard(0, 0)
activeEffects = {"Magic Missile": 1}
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({}, activeEffects)
def test_evaluateSpellEffects_effects_Drain(self):
w = d22_wizard.Wizard(0, 0)
activeEffects = {"Drain": 1}
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual([2, 2, 0, 0], effects)
def test_evaluateSpellEffects_activeEffects_Drain(self):
w = d22_wizard.Wizard(0, 0)
activeEffects = {"Drain": 1}
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({}, activeEffects)
def test_evaluateSpellEffects_effects_Shield(self):
w = d22_wizard.Wizard(0, 0)
duration = 6
activeEffects = {"Shield": duration}
for i in range(duration):
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual([0, 0, 7, 0], effects)
def test_evaluateSpellEffects_activeEffects_Shield(self):
w = d22_wizard.Wizard(0, 0)
duration = 6
activeEffects = {"Shield": duration}
for i in range(1, duration):
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({"Shield": duration - i}, activeEffects, i)
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({}, activeEffects)
def test_evaluateSpellEffects_effects_Poison(self):
w = d22_wizard.Wizard(0, 0)
activeEffects = {"Poison": 6}
duration = 6
activeEffects = {"Poison": duration}
for i in range(duration):
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual([3, 0, 0, 0], effects)
def test_evaluateSpellEffects_activeEffects_Poison(self):
w = d22_wizard.Wizard(0, 0)
duration = 6
activeEffects = {"Poison": duration}
for i in range(1, duration):
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({"Poison": duration - i}, activeEffects, i)
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({}, activeEffects)
def test_evaluateSpellEffects_effects_Recharge(self):
w = d22_wizard.Wizard(0, 0)
duration = 5
activeEffects = {"Recharge": duration}
for i in range(duration):
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual([0, 0, 0, 101], effects)
def test_evaluateSpellEffects_activeEffects_Recharge(self):
w = d22_wizard.Wizard(0, 0)
duration = 5
activeEffects = {"Recharge": duration}
for i in range(1, duration):
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({"Recharge": duration - i}, activeEffects, i)
effects = w._evaluateSpellEffects(activeEffects)
self.assertEqual({}, activeEffects)
if __name__ == '__main__':
unittest.main()
| 38.346535 | 74 | 0.653757 | [
"Unlicense"
] | dcsparkes/adventofcode | test/test_wizard.py | 3,873 | Python |
import numpy as np
np.random.seed(0)
ADOPT = 0
OVERRIDE = 1
WAIT = 2
class Environment(object):
def __init__(self, mining_powers, gammas, T):
# relative mining strengths.
self.mining_powers = mining_powers
self.gammas = gammas
self.num_miners = len(mining_powers)
# termination parameters
self.T = T
# chain variables
self.chain = ''
self.starting_points = np.zeros(self.num_miners, dtype=np.int64)
self.hidden_lengths = np.zeros(self.num_miners, dtype=np.int64)
def reset(self):
self.chain = ''
self.starting_points = np.zeros(self.num_miners, dtype=np.int64)
self.hidden_lengths = np.zeros(self.num_miners, dtype=np.int64)
def getNextBlockWinner(self):
winner = np.random.choice(np.arange(len(self.mining_powers)), p=self.mining_powers)
self.hidden_lengths[winner] += 1
return winner
def adopt(self, player_index):
_a, h = self.getState(player_index)
self.starting_points[player_index] = len(self.chain)
self.hidden_lengths[player_index] = 0
return self.getState(player_index), (0, h)
def wait(self, player_index):
a, h = self.getState(player_index)
if (a == self.T) or (h == self.T):
return self.adopt(player_index)
return self.getState(player_index), (0, 0)
def override(self, player_index):
a, h = self.getState(player_index)
if a <= h:
self.starting_points[player_index] = len(self.chain)
self.hidden_lengths[player_index] = 0
return self.getState(player_index), (0, 10)
# chop chain to proper length
self.chain = self.chain[:self.starting_points[player_index]]
new_blocks = str(player_index) * a
self.chain += new_blocks
self.starting_points[player_index] = len(self.chain)
self.hidden_lengths[player_index] = 0
return self.getState(player_index), (a, 0)
def getState(self, player_index):
return (self.hidden_lengths[player_index], len(self.chain)-self.starting_points[player_index])
def takeActionPlayer(self, player_index, action):
if action == ADOPT:
return self.adopt(player_index)
elif action == OVERRIDE:
return self.override(player_index)
elif action == WAIT:
return self.wait(player_index)
else:
raise KeyError('{} is not an action'.format(action))
if __name__ == "__main__":
powers = [0.55, 0.45]
gammas = [0.5, 0.5]
env = Environment(powers, gammas, T=9)
chain = ''
for _ in range(1000):
chain += str(env.getNextBlockWinner())
print('p0', chain.count('0'), chain.count('0')/len(chain))
print('p1', chain.count('1'), chain.count('1')/len(chain))
print('p2', chain.count('2'), chain.count('2')/len(chain))
| 35.39759 | 102 | 0.62015 | [
"MIT"
] | michaelneuder/parkes_lab_fa19 | proof_of_work/multiagent/turn_based/v5/environmentv5.py | 2,938 | Python |
"""
Statistical tools for time series analysis
"""
from statsmodels.compat.python import (iteritems, range, lrange, string_types,
lzip, zip, long)
from statsmodels.compat.scipy import _next_regular
import numpy as np
from numpy.linalg import LinAlgError
from scipy import stats
from statsmodels.regression.linear_model import OLS, yule_walker
from statsmodels.tools.tools import add_constant, Bunch
from statsmodels.tsa.tsatools import lagmat, lagmat2ds, add_trend
from statsmodels.tsa.adfvalues import mackinnonp, mackinnoncrit
from statsmodels.tsa._bds import bds
from statsmodels.tsa.arima_model import ARMA
from statsmodels.tools.sm_exceptions import InterpolationWarning, MissingDataError
__all__ = ['acovf', 'acf', 'pacf', 'pacf_yw', 'pacf_ols', 'ccovf', 'ccf',
'periodogram', 'q_stat', 'coint', 'arma_order_select_ic',
'adfuller', 'kpss', 'bds']
SQRTEPS = np.sqrt(np.finfo(np.double).eps)
#NOTE: now in two places to avoid circular import
#TODO: I like the bunch pattern for this too.
class ResultsStore(object):
def __str__(self):
return self._str # pylint: disable=E1101
def _autolag(mod, endog, exog, startlag, maxlag, method, modargs=(),
fitargs=(), regresults=False):
"""
Returns the results for the lag length that maximizes the info criterion.
Parameters
----------
mod : Model class
Model estimator class
endog : array-like
nobs array containing endogenous variable
exog : array-like
nobs by (startlag + maxlag) array containing lags and possibly other
variables
startlag : int
The first zero-indexed column to hold a lag. See Notes.
maxlag : int
The highest lag order for lag length selection.
method : {'aic', 'bic', 't-stat'}
aic - Akaike Information Criterion
bic - Bayes Information Criterion
t-stat - Based on last lag
modargs : tuple, optional
args to pass to model. See notes.
fitargs : tuple, optional
args to pass to fit. See notes.
regresults : bool, optional
Flag indicating to return optional return results
Returns
-------
icbest : float
Best information criteria.
bestlag : int
The lag length that maximizes the information criterion.
results : dict, optional
Dictionary containing all estimation results
Notes
-----
Does estimation like mod(endog, exog[:,:i], *modargs).fit(*fitargs)
where i goes from lagstart to lagstart+maxlag+1. Therefore, lags are
assumed to be in contiguous columns from low to high lag length with
the highest lag in the last column.
"""
#TODO: can tcol be replaced by maxlag + 2?
#TODO: This could be changed to laggedRHS and exog keyword arguments if
# this will be more general.
results = {}
method = method.lower()
for lag in range(startlag, startlag + maxlag + 1):
mod_instance = mod(endog, exog[:, :lag], *modargs)
results[lag] = mod_instance.fit()
if method == "aic":
icbest, bestlag = min((v.aic, k) for k, v in iteritems(results))
elif method == "bic":
icbest, bestlag = min((v.bic, k) for k, v in iteritems(results))
elif method == "t-stat":
#stop = stats.norm.ppf(.95)
stop = 1.6448536269514722
for lag in range(startlag + maxlag, startlag - 1, -1):
icbest = np.abs(results[lag].tvalues[-1])
if np.abs(icbest) >= stop:
bestlag = lag
icbest = icbest
break
else:
raise ValueError("Information Criterion %s not understood.") % method
if not regresults:
return icbest, bestlag
else:
return icbest, bestlag, results
#this needs to be converted to a class like HetGoldfeldQuandt,
# 3 different returns are a mess
# See:
#Ng and Perron(2001), Lag length selection and the construction of unit root
#tests with good size and power, Econometrica, Vol 69 (6) pp 1519-1554
#TODO: include drift keyword, only valid with regression == "c"
# just changes the distribution of the test statistic to a t distribution
#TODO: autolag is untested
def adfuller(x, maxlag=None, regression="c", autolag='AIC',
store=False, regresults=False):
"""
Augmented Dickey-Fuller unit root test
The Augmented Dickey-Fuller test can be used to test for a unit root in a
univariate process in the presence of serial correlation.
Parameters
----------
x : array_like, 1d
data series
maxlag : int
Maximum lag which is included in test, default 12*(nobs/100)^{1/4}
regression : {'c','ct','ctt','nc'}
Constant and trend order to include in regression
* 'c' : constant only (default)
* 'ct' : constant and trend
* 'ctt' : constant, and linear and quadratic trend
* 'nc' : no constant, no trend
autolag : {'AIC', 'BIC', 't-stat', None}
* if None, then maxlag lags are used
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
store : bool
If True, then a result instance is returned additionally to
the adf statistic. Default is False
regresults : bool, optional
If True, the full regression results are returned. Default is False
Returns
-------
adf : float
Test statistic
pvalue : float
MacKinnon's approximate p-value based on MacKinnon (1994, 2010)
usedlag : int
Number of lags used
nobs : int
Number of observations used for the ADF regression and calculation of
the critical values
critical values : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels. Based on MacKinnon (2010)
icbest : float
The maximized information criterion if autolag is not None.
resstore : ResultStore, optional
A dummy class with results attached as attributes
Notes
-----
The null hypothesis of the Augmented Dickey-Fuller is that there is a unit
root, with the alternative that there is no unit root. If the pvalue is
above a critical size, then we cannot reject that there is a unit root.
The p-values are obtained through regression surface approximation from
MacKinnon 1994, but using the updated 2010 tables. If the p-value is close
to significant, then the critical values should be used to judge whether
to reject the null.
The autolag option and maxlag for it are described in Greene.
Examples
--------
See example notebook
References
----------
.. [*] W. Green. "Econometric Analysis," 5th ed., Pearson, 2003.
.. [*] Hamilton, J.D. "Time Series Analysis". Princeton, 1994.
.. [*] MacKinnon, J.G. 1994. "Approximate asymptotic distribution functions for
unit-root and cointegration tests. `Journal of Business and Economic
Statistics` 12, 167-76.
.. [*] MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests." Queen's
University, Dept of Economics, Working Papers. Available at
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
if regresults:
store = True
trenddict = {None: 'nc', 0: 'c', 1: 'ct', 2: 'ctt'}
if regression is None or isinstance(regression, (int, long)):
regression = trenddict[regression]
regression = regression.lower()
if regression not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("regression option %s not understood") % regression
x = np.asarray(x)
nobs = x.shape[0]
if maxlag is None:
#from Greene referencing Schwert 1989
maxlag = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
xdiff = np.diff(x)
xdall = lagmat(xdiff[:, None], maxlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
if store:
resstore = ResultsStore()
if autolag:
if regression != 'nc':
fullRHS = add_trend(xdall, regression, prepend=True)
else:
fullRHS = xdall
startlag = fullRHS.shape[1] - xdall.shape[1] + 1 # 1 for level # pylint: disable=E1103
#search for lag length with smallest information criteria
#Note: use the same number of observations to have comparable IC
#aic and bic: smaller is better
if not regresults:
icbest, bestlag = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag)
else:
icbest, bestlag, alres = _autolag(OLS, xdshort, fullRHS, startlag,
maxlag, autolag,
regresults=regresults)
resstore.autolag_results = alres
bestlag -= startlag # convert to lag not column index
#rerun ols with best autolag
xdall = lagmat(xdiff[:, None], bestlag, trim='both', original='in')
nobs = xdall.shape[0] # pylint: disable=E1103
xdall[:, 0] = x[-nobs - 1:-1] # replace 0 xdiff with level of x
xdshort = xdiff[-nobs:]
usedlag = bestlag
else:
usedlag = maxlag
icbest = None
if regression != 'nc':
resols = OLS(xdshort, add_trend(xdall[:, :usedlag + 1],
regression)).fit()
else:
resols = OLS(xdshort, xdall[:, :usedlag + 1]).fit()
adfstat = resols.tvalues[0]
# adfstat = (resols.params[0]-1.0)/resols.bse[0]
# the "asymptotically correct" z statistic is obtained as
# nobs/(1-np.sum(resols.params[1:-(trendorder+1)])) (resols.params[0] - 1)
# I think this is the statistic that is used for series that are integrated
# for orders higher than I(1), ie., not ADF but cointegration tests.
# Get approx p-value and critical values
pvalue = mackinnonp(adfstat, regression=regression, N=1)
critvalues = mackinnoncrit(N=1, regression=regression, nobs=nobs)
critvalues = {"1%" : critvalues[0], "5%" : critvalues[1],
"10%" : critvalues[2]}
if store:
resstore.resols = resols
resstore.maxlag = maxlag
resstore.usedlag = usedlag
resstore.adfstat = adfstat
resstore.critvalues = critvalues
resstore.nobs = nobs
resstore.H0 = ("The coefficient on the lagged level equals 1 - "
"unit root")
resstore.HA = "The coefficient on the lagged level < 1 - stationary"
resstore.icbest = icbest
resstore._str = 'Augmented Dickey-Fuller Test Results'
return adfstat, pvalue, critvalues, resstore
else:
if not autolag:
return adfstat, pvalue, usedlag, nobs, critvalues
else:
return adfstat, pvalue, usedlag, nobs, critvalues, icbest
def acovf(x, unbiased=False, demean=True, fft=False, missing='none'):
"""
Autocovariance for 1D
Parameters
----------
x : array
Time series data. Must be 1d.
unbiased : bool
If True, then denominators is n-k, otherwise n
demean : bool
If True, then subtract the mean x from each element of x
fft : bool
If True, use FFT convolution. This method should be preferred
for long time series.
missing : str
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acovf : array
autocovariance function
References
-----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
x = np.squeeze(np.asarray(x))
if x.ndim > 1:
raise ValueError("x must be 1d. Got %d dims." % x.ndim)
missing = missing.lower()
if missing not in ['none', 'raise', 'conservative', 'drop']:
raise ValueError("missing option %s not understood" % missing)
if missing == 'none':
deal_with_masked = False
else:
deal_with_masked = has_missing(x)
if deal_with_masked:
if missing == 'raise':
raise MissingDataError("NaNs were encountered in the data")
notmask_bool = ~np.isnan(x) #bool
if missing == 'conservative':
x[~notmask_bool] = 0
else: #'drop'
x = x[notmask_bool] #copies non-missing
notmask_int = notmask_bool.astype(int) #int
if demean and deal_with_masked:
# whether 'drop' or 'conservative':
xo = x - x.sum()/notmask_int.sum()
if missing=='conservative':
xo[~notmask_bool] = 0
elif demean:
xo = x - x.mean()
else:
xo = x
n = len(x)
if unbiased and deal_with_masked and missing=='conservative':
d = np.correlate(notmask_int, notmask_int, 'full')
elif unbiased:
xi = np.arange(1, n + 1)
d = np.hstack((xi, xi[:-1][::-1]))
elif deal_with_masked: #biased and NaNs given and ('drop' or 'conservative')
d = notmask_int.sum() * np.ones(2*n-1)
else: #biased and no NaNs or missing=='none'
d = n * np.ones(2 * n - 1)
if fft:
nobs = len(xo)
n = _next_regular(2 * nobs + 1)
Frf = np.fft.fft(xo, n=n)
acov = np.fft.ifft(Frf * np.conjugate(Frf))[:nobs] / d[nobs - 1:]
acov = acov.real
else:
acov = (np.correlate(xo, xo, 'full') / d)[n - 1:]
if deal_with_masked and missing=='conservative':
# restore data for the user
x[~notmask_bool] = np.nan
return acov
def q_stat(x, nobs, type="ljungbox"):
"""
Return's Ljung-Box Q Statistic
x : array-like
Array of autocorrelation coefficients. Can be obtained from acf.
nobs : int
Number of observations in the entire sample (ie., not just the length
of the autocorrelation function results.
Returns
-------
q-stat : array
Ljung-Box Q-statistic for autocorrelation parameters
p-value : array
P-value of the Q statistic
Notes
------
Written to be used with acf.
"""
x = np.asarray(x)
if type == "ljungbox":
ret = (nobs * (nobs + 2) *
np.cumsum((1. / (nobs - np.arange(1, len(x) + 1))) * x**2))
chi2 = stats.chi2.sf(ret, np.arange(1, len(x) + 1))
return ret, chi2
#NOTE: Changed unbiased to False
#see for example
# http://www.itl.nist.gov/div898/handbook/eda/section3/autocopl.htm
def acf(x, unbiased=False, nlags=40, qstat=False, fft=False, alpha=None,
missing='none'):
"""
Autocorrelation function for 1d arrays.
Parameters
----------
x : array
Time series data
unbiased : bool
If True, then denominators for autocovariance are n-k, otherwise n
nlags: int, optional
Number of lags to return autocorrelation for.
qstat : bool, optional
If True, returns the Ljung-Box q statistic for each autocorrelation
coefficient. See q_stat for more information.
fft : bool, optional
If True, computes the ACF via FFT.
alpha : scalar, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
Bartlett\'s formula.
missing : str, optional
A string in ['none', 'raise', 'conservative', 'drop'] specifying how the NaNs
are to be treated.
Returns
-------
acf : array
autocorrelation function
confint : array, optional
Confidence intervals for the ACF. Returned if confint is not None.
qstat : array, optional
The Ljung-Box Q-Statistic. Returned if q_stat is True.
pvalues : array, optional
The p-values associated with the Q-statistics. Returned if q_stat is
True.
Notes
-----
The acf at lag 0 (ie., 1) is returned.
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
References
----------
.. [*] Parzen, E., 1963. On spectral analysis with missing observations
and amplitude modulation. Sankhya: The Indian Journal of
Statistics, Series A, pp.383-392.
"""
nobs = len(x) # should this shrink for missing='drop' and NaNs in x?
avf = acovf(x, unbiased=unbiased, demean=True, fft=fft, missing=missing)
acf = avf[:nlags + 1] / avf[0]
if not (qstat or alpha):
return acf
if alpha is not None:
varacf = np.ones(nlags + 1) / nobs
varacf[0] = 0
varacf[1] = 1. / nobs
varacf[2:] *= 1 + 2 * np.cumsum(acf[1:-1]**2)
interval = stats.norm.ppf(1 - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(acf - interval, acf + interval))
if not qstat:
return acf, confint
if qstat:
qstat, pvalue = q_stat(acf[1:], nobs=nobs) # drop lag 0
if alpha is not None:
return acf, confint, qstat, pvalue
else:
return acf, qstat, pvalue
def pacf_yw(x, nlags=40, method='unbiased'):
'''Partial autocorrelation estimated with non-recursive yule_walker
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : 'unbiased' (default) or 'mle'
method for the autocovariance calculations in yule walker
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves yule_walker for each desired lag and contains
currently duplicate calculations.
'''
pacf = [1.]
for k in range(1, nlags + 1):
pacf.append(yule_walker(x, k, method=method)[0][-1])
return np.array(pacf)
#NOTE: this is incorrect.
def pacf_ols(x, nlags=40):
'''Calculate partial autocorrelations
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
Number of lags for which pacf is returned. Lag 0 is not returned.
Returns
-------
pacf : 1d array
partial autocorrelations, maxlag+1 elements
Notes
-----
This solves a separate OLS estimation for each desired lag.
'''
#TODO: add warnings for Yule-Walker
#NOTE: demeaning and not using a constant gave incorrect answers?
#JP: demeaning should have a better estimate of the constant
#maybe we can compare small sample properties with a MonteCarlo
xlags, x0 = lagmat(x, nlags, original='sep')
#xlags = sm.add_constant(lagmat(x, nlags), prepend=True)
xlags = add_constant(xlags)
pacf = [1.]
for k in range(1, nlags+1):
res = OLS(x0[k:], xlags[k:, :k+1]).fit()
#np.take(xlags[k:], range(1,k+1)+[-1],
pacf.append(res.params[-1])
return np.array(pacf)
def pacf(x, nlags=40, method='ywunbiased', alpha=None):
"""
Partial autocorrelation estimated
Parameters
----------
x : 1d array
observations of time series for which pacf is calculated
nlags : int
largest lag for which pacf is returned
method : {'ywunbiased', 'ywmle', 'ols'}
specifies which method for the calculations to use:
- yw or ywunbiased : yule walker with bias correction in denominator
for acovf. Default.
- ywm or ywmle : yule walker without bias correction
- ols - regression of time series on lags of it and on constant
- ld or ldunbiased : Levinson-Durbin recursion with bias correction
- ldb or ldbiased : Levinson-Durbin recursion without bias correction
alpha : float, optional
If a number is given, the confidence intervals for the given level are
returned. For instance if alpha=.05, 95 % confidence intervals are
returned where the standard deviation is computed according to
1/sqrt(len(x))
Returns
-------
pacf : 1d array
partial autocorrelations, nlags elements, including lag zero
confint : array, optional
Confidence intervals for the PACF. Returned if confint is not None.
Notes
-----
This solves yule_walker equations or ols for each desired lag
and contains currently duplicate calculations.
"""
if method == 'ols':
ret = pacf_ols(x, nlags=nlags)
elif method in ['yw', 'ywu', 'ywunbiased', 'yw_unbiased']:
ret = pacf_yw(x, nlags=nlags, method='unbiased')
elif method in ['ywm', 'ywmle', 'yw_mle']:
ret = pacf_yw(x, nlags=nlags, method='mle')
elif method in ['ld', 'ldu', 'ldunbiase', 'ld_unbiased']:
acv = acovf(x, unbiased=True)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
#print 'ld', ld_
ret = ld_[2]
# inconsistent naming with ywmle
elif method in ['ldb', 'ldbiased', 'ld_biased']:
acv = acovf(x, unbiased=False)
ld_ = levinson_durbin(acv, nlags=nlags, isacov=True)
ret = ld_[2]
else:
raise ValueError('method not available')
if alpha is not None:
varacf = 1. / len(x) # for all lags >=1
interval = stats.norm.ppf(1. - alpha / 2.) * np.sqrt(varacf)
confint = np.array(lzip(ret - interval, ret + interval))
confint[0] = ret[0] # fix confidence interval for lag 0 to varpacf=0
return ret, confint
else:
return ret
def ccovf(x, y, unbiased=True, demean=True):
''' crosscovariance for 1D
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators is n-k, otherwise n
Returns
-------
ccovf : array
autocovariance function
Notes
-----
This uses np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
'''
n = len(x)
if demean:
xo = x - x.mean()
yo = y - y.mean()
else:
xo = x
yo = y
if unbiased:
xi = np.ones(n)
d = np.correlate(xi, xi, 'full')
else:
d = n
return (np.correlate(xo, yo, 'full') / d)[n - 1:]
def ccf(x, y, unbiased=True):
'''cross-correlation function for 1d
Parameters
----------
x, y : arrays
time series data
unbiased : boolean
if True, then denominators for autocovariance is n-k, otherwise n
Returns
-------
ccf : array
cross-correlation function of x and y
Notes
-----
This is based np.correlate which does full convolution. For very long time
series it is recommended to use fft convolution instead.
If unbiased is true, the denominator for the autocovariance is adjusted
but the autocorrelation is not an unbiased estimtor.
'''
cvf = ccovf(x, y, unbiased=unbiased, demean=True)
return cvf / (np.std(x) * np.std(y))
def periodogram(X):
"""
Returns the periodogram for the natural frequency of X
Parameters
----------
X : array-like
Array for which the periodogram is desired.
Returns
-------
pgram : array
1./len(X) * np.abs(np.fft.fft(X))**2
References
----------
Brockwell and Davis.
"""
X = np.asarray(X)
#if kernel == "bartlett":
# w = 1 - np.arange(M+1.)/M #JP removed integer division
pergr = 1. / len(X) * np.abs(np.fft.fft(X))**2
pergr[0] = 0. # what are the implications of this?
return pergr
#copied from nitime and statsmodels\sandbox\tsa\examples\try_ld_nitime.py
#TODO: check what to return, for testing and trying out returns everything
def levinson_durbin(s, nlags=10, isacov=False):
'''Levinson-Durbin recursion for autoregressive processes
Parameters
----------
s : array_like
If isacov is False, then this is the time series. If iasacov is true
then this is interpreted as autocovariance starting with lag 0
nlags : integer
largest lag to include in recursion or order of the autoregressive
process
isacov : boolean
flag to indicate whether the first argument, s, contains the
autocovariances or the data series.
Returns
-------
sigma_v : float
estimate of the error variance ?
arcoefs : ndarray
estimate of the autoregressive coefficients
pacf : ndarray
partial autocorrelation function
sigma : ndarray
entire sigma array from intermediate result, last value is sigma_v
phi : ndarray
entire phi array from intermediate result, last column contains
autoregressive coefficients for AR(nlags) with a leading 1
Notes
-----
This function returns currently all results, but maybe we drop sigma and
phi from the returns.
If this function is called with the time series (isacov=False), then the
sample autocovariance function is calculated with the default options
(biased, no fft).
'''
s = np.asarray(s)
order = nlags # rename compared to nitime
#from nitime
##if sxx is not None and type(sxx) == np.ndarray:
## sxx_m = sxx[:order+1]
##else:
## sxx_m = ut.autocov(s)[:order+1]
if isacov:
sxx_m = s
else:
sxx_m = acovf(s)[:order + 1] # not tested
phi = np.zeros((order + 1, order + 1), 'd')
sig = np.zeros(order + 1)
# initial points for the recursion
phi[1, 1] = sxx_m[1] / sxx_m[0]
sig[1] = sxx_m[0] - phi[1, 1] * sxx_m[1]
for k in range(2, order + 1):
phi[k, k] = (sxx_m[k] - np.dot(phi[1:k, k-1],
sxx_m[1:k][::-1])) / sig[k-1]
for j in range(1, k):
phi[j, k] = phi[j, k-1] - phi[k, k] * phi[k-j, k-1]
sig[k] = sig[k-1] * (1 - phi[k, k]**2)
sigma_v = sig[-1]
arcoefs = phi[1:, -1]
pacf_ = np.diag(phi).copy()
pacf_[0] = 1.
return sigma_v, arcoefs, pacf_, sig, phi # return everything
def grangercausalitytests(x, maxlag, addconst=True, verbose=True):
"""four tests for granger non causality of 2 timeseries
all four tests give similar results
`params_ftest` and `ssr_ftest` are equivalent based on F test which is
identical to lmtest:grangertest in R
Parameters
----------
x : array, 2d
data for test whether the time series in the second column Granger
causes the time series in the first column
maxlag : integer
the Granger causality test results are calculated for all lags up to
maxlag
verbose : bool
print results if true
Returns
-------
results : dictionary
all test results, dictionary keys are the number of lags. For each
lag the values are a tuple, with the first element a dictionary with
teststatistic, pvalues, degrees of freedom, the second element are
the OLS estimation results for the restricted model, the unrestricted
model and the restriction (contrast) matrix for the parameter f_test.
Notes
-----
TODO: convert to class and attach results properly
The Null hypothesis for grangercausalitytests is that the time series in
the second column, x2, does NOT Granger cause the time series in the first
column, x1. Grange causality means that past values of x2 have a
statistically significant effect on the current value of x1, taking past
values of x1 into account as regressors. We reject the null hypothesis
that x2 does not Granger cause x1 if the pvalues are below a desired size
of the test.
The null hypothesis for all four test is that the coefficients
corresponding to past values of the second time series are zero.
'params_ftest', 'ssr_ftest' are based on F distribution
'ssr_chi2test', 'lrtest' are based on chi-square distribution
References
----------
http://en.wikipedia.org/wiki/Granger_causality
Greene: Econometric Analysis
"""
from scipy import stats
x = np.asarray(x)
if x.shape[0] <= 3 * maxlag + int(addconst):
raise ValueError("Insufficient observations. Maximum allowable "
"lag is {0}".format(int((x.shape[0] - int(addconst)) /
3) - 1))
resli = {}
for mlg in range(1, maxlag + 1):
result = {}
if verbose:
print('\nGranger Causality')
print('number of lags (no zero)', mlg)
mxlg = mlg
# create lagmat of both time series
dta = lagmat2ds(x, mxlg, trim='both', dropex=1)
#add constant
if addconst:
dtaown = add_constant(dta[:, 1:(mxlg + 1)], prepend=False)
dtajoint = add_constant(dta[:, 1:], prepend=False)
else:
raise NotImplementedError('Not Implemented')
#dtaown = dta[:, 1:mxlg]
#dtajoint = dta[:, 1:]
# Run ols on both models without and with lags of second variable
res2down = OLS(dta[:, 0], dtaown).fit()
res2djoint = OLS(dta[:, 0], dtajoint).fit()
#print results
#for ssr based tests see:
#http://support.sas.com/rnd/app/examples/ets/granger/index.htm
#the other tests are made-up
# Granger Causality test using ssr (F statistic)
fgc1 = ((res2down.ssr - res2djoint.ssr) /
res2djoint.ssr / mxlg * res2djoint.df_resid)
if verbose:
print('ssr based F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (fgc1,
stats.f.sf(fgc1, mxlg,
res2djoint.df_resid),
res2djoint.df_resid, mxlg))
result['ssr_ftest'] = (fgc1,
stats.f.sf(fgc1, mxlg, res2djoint.df_resid),
res2djoint.df_resid, mxlg)
# Granger Causality test using ssr (ch2 statistic)
fgc2 = res2down.nobs * (res2down.ssr - res2djoint.ssr) / res2djoint.ssr
if verbose:
print('ssr based chi2 test: chi2=%-8.4f, p=%-8.4f, '
'df=%d' % (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg))
result['ssr_chi2test'] = (fgc2, stats.chi2.sf(fgc2, mxlg), mxlg)
#likelihood ratio test pvalue:
lr = -2 * (res2down.llf - res2djoint.llf)
if verbose:
print('likelihood ratio test: chi2=%-8.4f, p=%-8.4f, df=%d' %
(lr, stats.chi2.sf(lr, mxlg), mxlg))
result['lrtest'] = (lr, stats.chi2.sf(lr, mxlg), mxlg)
# F test that all lag coefficients of exog are zero
rconstr = np.column_stack((np.zeros((mxlg, mxlg)),
np.eye(mxlg, mxlg),
np.zeros((mxlg, 1))))
ftres = res2djoint.f_test(rconstr)
if verbose:
print('parameter F test: F=%-8.4f, p=%-8.4f, df_denom=%d,'
' df_num=%d' % (ftres.fvalue, ftres.pvalue, ftres.df_denom,
ftres.df_num))
result['params_ftest'] = (np.squeeze(ftres.fvalue)[()],
np.squeeze(ftres.pvalue)[()],
ftres.df_denom, ftres.df_num)
resli[mxlg] = (result, [res2down, res2djoint, rconstr])
return resli
def coint(y0, y1, trend='c', method='aeg', maxlag=None, autolag='aic',
return_results=None):
"""Test for no-cointegration of a univariate equation
The null hypothesis is no cointegration. Variables in y0 and y1 are
assumed to be integrated of order 1, I(1).
This uses the augmented Engle-Granger two-step cointegration test.
Constant or trend is included in 1st stage regression, i.e. in
cointegrating equation.
**Warning:** The autolag default has changed compared to statsmodels 0.8.
In 0.8 autolag was always None, no the keyword is used and defaults to
'aic'. Use `autolag=None` to avoid the lag search.
Parameters
----------
y1 : array_like, 1d
first element in cointegrating vector
y2 : array_like
remaining elements in cointegrating vector
trend : str {'c', 'ct'}
trend term included in regression for cointegrating equation
* 'c' : constant
* 'ct' : constant and linear trend
* also available quadratic trend 'ctt', and no constant 'nc'
method : string
currently only 'aeg' for augmented Engle-Granger test is available.
default might change.
maxlag : None or int
keyword for `adfuller`, largest or given number of lags
autolag : string
keyword for `adfuller`, lag selection criterion.
* if None, then maxlag lags are used without lag search
* if 'AIC' (default) or 'BIC', then the number of lags is chosen
to minimize the corresponding information criterion
* 't-stat' based choice of maxlag. Starts with maxlag and drops a
lag until the t-statistic on the last lag length is significant
using a 5%-sized test
return_results : bool
for future compatibility, currently only tuple available.
If True, then a results instance is returned. Otherwise, a tuple
with the test outcome is returned.
Set `return_results=False` to avoid future changes in return.
Returns
-------
coint_t : float
t-statistic of unit-root test on residuals
pvalue : float
MacKinnon's approximate, asymptotic p-value based on MacKinnon (1994)
crit_value : dict
Critical values for the test statistic at the 1 %, 5 %, and 10 %
levels based on regression curve. This depends on the number of
observations.
Notes
-----
The Null hypothesis is that there is no cointegration, the alternative
hypothesis is that there is cointegrating relationship. If the pvalue is
small, below a critical size, then we can reject the hypothesis that there
is no cointegrating relationship.
P-values and critical values are obtained through regression surface
approximation from MacKinnon 1994 and 2010.
If the two series are almost perfectly collinear, then computing the
test is numerically unstable. However, the two series will be cointegrated
under the maintained assumption that they are integrated. In this case
the t-statistic will be set to -inf and the pvalue to zero.
TODO: We could handle gaps in data by dropping rows with nans in the
auxiliary regressions. Not implemented yet, currently assumes no nans
and no gaps in time series.
References
----------
MacKinnon, J.G. 1994 "Approximate Asymptotic Distribution Functions for
Unit-Root and Cointegration Tests." Journal of Business & Economics
Statistics, 12.2, 167-76.
MacKinnon, J.G. 2010. "Critical Values for Cointegration Tests."
Queen's University, Dept of Economics Working Papers 1227.
http://ideas.repec.org/p/qed/wpaper/1227.html
"""
trend = trend.lower()
if trend not in ['c', 'nc', 'ct', 'ctt']:
raise ValueError("trend option %s not understood" % trend)
y0 = np.asarray(y0)
y1 = np.asarray(y1)
if y1.ndim < 2:
y1 = y1[:, None]
nobs, k_vars = y1.shape
k_vars += 1 # add 1 for y0
if trend == 'nc':
xx = y1
else:
xx = add_trend(y1, trend=trend, prepend=False)
res_co = OLS(y0, xx).fit()
if res_co.rsquared < 1 - 100 * SQRTEPS:
res_adf = adfuller(res_co.resid, maxlag=maxlag, autolag=autolag,
regression='nc')
else:
import warnings
warnings.warn("y0 and y1 are (almost) perfectly colinear."
"Cointegration test is not reliable in this case.")
# Edge case where series are too similar
res_adf = (-np.inf,)
# no constant or trend, see egranger in Stata and MacKinnon
if trend == 'nc':
crit = [np.nan] * 3 # 2010 critical values not available
else:
crit = mackinnoncrit(N=k_vars, regression=trend, nobs=nobs - 1)
# nobs - 1, the -1 is to match egranger in Stata, I don't know why.
# TODO: check nobs or df = nobs - k
pval_asy = mackinnonp(res_adf[0], regression=trend, N=k_vars)
return res_adf[0], pval_asy, crit
def _safe_arma_fit(y, order, model_kw, trend, fit_kw, start_params=None):
try:
return ARMA(y, order=order, **model_kw).fit(disp=0, trend=trend,
start_params=start_params,
**fit_kw)
except LinAlgError:
# SVD convergence failure on badly misspecified models
return
except ValueError as error:
if start_params is not None: # don't recurse again
# user supplied start_params only get one chance
return
# try a little harder, should be handled in fit really
elif ('initial' not in error.args[0] or 'initial' in str(error)):
start_params = [.1] * sum(order)
if trend == 'c':
start_params = [.1] + start_params
return _safe_arma_fit(y, order, model_kw, trend, fit_kw,
start_params)
else:
return
except: # no idea what happened
return
def arma_order_select_ic(y, max_ar=4, max_ma=2, ic='bic', trend='c',
model_kw={}, fit_kw={}):
"""
Returns information criteria for many ARMA models
Parameters
----------
y : array-like
Time-series data
max_ar : int
Maximum number of AR lags to use. Default 4.
max_ma : int
Maximum number of MA lags to use. Default 2.
ic : str, list
Information criteria to report. Either a single string or a list
of different criteria is possible.
trend : str
The trend to use when fitting the ARMA models.
model_kw : dict
Keyword arguments to be passed to the ``ARMA`` model
fit_kw : dict
Keyword arguments to be passed to ``ARMA.fit``.
Returns
-------
obj : Results object
Each ic is an attribute with a DataFrame for the results. The AR order
used is the row index. The ma order used is the column index. The
minimum orders are available as ``ic_min_order``.
Examples
--------
>>> from statsmodels.tsa.arima_process import arma_generate_sample
>>> import statsmodels.api as sm
>>> import numpy as np
>>> arparams = np.array([.75, -.25])
>>> maparams = np.array([.65, .35])
>>> arparams = np.r_[1, -arparams]
>>> maparam = np.r_[1, maparams]
>>> nobs = 250
>>> np.random.seed(2014)
>>> y = arma_generate_sample(arparams, maparams, nobs)
>>> res = sm.tsa.arma_order_select_ic(y, ic=['aic', 'bic'], trend='nc')
>>> res.aic_min_order
>>> res.bic_min_order
Notes
-----
This method can be used to tentatively identify the order of an ARMA
process, provided that the time series is stationary and invertible. This
function computes the full exact MLE estimate of each model and can be,
therefore a little slow. An implementation using approximate estimates
will be provided in the future. In the meantime, consider passing
{method : 'css'} to fit_kw.
"""
from pandas import DataFrame
ar_range = lrange(0, max_ar + 1)
ma_range = lrange(0, max_ma + 1)
if isinstance(ic, string_types):
ic = [ic]
elif not isinstance(ic, (list, tuple)):
raise ValueError("Need a list or a tuple for ic if not a string.")
results = np.zeros((len(ic), max_ar + 1, max_ma + 1))
for ar in ar_range:
for ma in ma_range:
if ar == 0 and ma == 0 and trend == 'nc':
results[:, ar, ma] = np.nan
continue
mod = _safe_arma_fit(y, (ar, ma), model_kw, trend, fit_kw)
if mod is None:
results[:, ar, ma] = np.nan
continue
for i, criteria in enumerate(ic):
results[i, ar, ma] = getattr(mod, criteria)
dfs = [DataFrame(res, columns=ma_range, index=ar_range) for res in results]
res = dict(zip(ic, dfs))
# add the minimums to the results dict
min_res = {}
for i, result in iteritems(res):
mins = np.where(result.min().min() == result)
min_res.update({i + '_min_order' : (mins[0][0], mins[1][0])})
res.update(min_res)
return Bunch(**res)
def has_missing(data):
"""
Returns True if 'data' contains missing entries, otherwise False
"""
return np.isnan(np.sum(data))
def kpss(x, regression='c', lags=None, store=False):
"""
Kwiatkowski-Phillips-Schmidt-Shin test for stationarity.
Computes the Kwiatkowski-Phillips-Schmidt-Shin (KPSS) test for the null
hypothesis that x is level or trend stationary.
Parameters
----------
x : array_like, 1d
Data series
regression : str{'c', 'ct'}
Indicates the null hypothesis for the KPSS test
* 'c' : The data is stationary around a constant (default)
* 'ct' : The data is stationary around a trend
lags : int
Indicates the number of lags to be used. If None (default),
lags is set to int(12 * (n / 100)**(1 / 4)), as outlined in
Schwert (1989).
store : bool
If True, then a result instance is returned additionally to
the KPSS statistic (default is False).
Returns
-------
kpss_stat : float
The KPSS test statistic
p_value : float
The p-value of the test. The p-value is interpolated from
Table 1 in Kwiatkowski et al. (1992), and a boundary point
is returned if the test statistic is outside the table of
critical values, that is, if the p-value is outside the
interval (0.01, 0.1).
lags : int
The truncation lag parameter
crit : dict
The critical values at 10%, 5%, 2.5% and 1%. Based on
Kwiatkowski et al. (1992).
resstore : (optional) instance of ResultStore
An instance of a dummy class with results attached as attributes
Notes
-----
To estimate sigma^2 the Newey-West estimator is used. If lags is None,
the truncation lag parameter is set to int(12 * (n / 100) ** (1 / 4)),
as outlined in Schwert (1989). The p-values are interpolated from
Table 1 of Kwiatkowski et al. (1992). If the computed statistic is
outside the table of critical values, then a warning message is
generated.
Missing values are not handled.
References
----------
D. Kwiatkowski, P. C. B. Phillips, P. Schmidt, and Y. Shin (1992): Testing
the Null Hypothesis of Stationarity against the Alternative of a Unit Root.
`Journal of Econometrics` 54, 159-178.
"""
from warnings import warn
nobs = len(x)
x = np.asarray(x)
hypo = regression.lower()
# if m is not one, n != m * n
if nobs != x.size:
raise ValueError("x of shape {0} not understood".format(x.shape))
if hypo == 'ct':
# p. 162 Kwiatkowski et al. (1992): y_t = beta * t + r_t + e_t,
# where beta is the trend, r_t a random walk and e_t a stationary
# error term.
resids = OLS(x, add_constant(np.arange(1, nobs + 1))).fit().resid
crit = [0.119, 0.146, 0.176, 0.216]
elif hypo == 'c':
# special case of the model above, where beta = 0 (so the null
# hypothesis is that the data is stationary around r_0).
resids = x - x.mean()
crit = [0.347, 0.463, 0.574, 0.739]
else:
raise ValueError("hypothesis '{0}' not understood".format(hypo))
if lags is None:
# from Kwiatkowski et al. referencing Schwert (1989)
lags = int(np.ceil(12. * np.power(nobs / 100., 1 / 4.)))
pvals = [0.10, 0.05, 0.025, 0.01]
eta = sum(resids.cumsum()**2) / (nobs**2) # eq. 11, p. 165
s_hat = _sigma_est_kpss(resids, nobs, lags)
kpss_stat = eta / s_hat
p_value = np.interp(kpss_stat, crit, pvals)
if p_value == pvals[-1]:
warn("p-value is smaller than the indicated p-value", InterpolationWarning)
elif p_value == pvals[0]:
warn("p-value is greater than the indicated p-value", InterpolationWarning)
crit_dict = {'10%': crit[0], '5%': crit[1], '2.5%': crit[2], '1%': crit[3]}
if store:
rstore = ResultsStore()
rstore.lags = lags
rstore.nobs = nobs
stationary_type = "level" if hypo == 'c' else "trend"
rstore.H0 = "The series is {0} stationary".format(stationary_type)
rstore.HA = "The series is not {0} stationary".format(stationary_type)
return kpss_stat, p_value, crit_dict, rstore
else:
return kpss_stat, p_value, lags, crit_dict
def _sigma_est_kpss(resids, nobs, lags):
"""
Computes equation 10, p. 164 of Kwiatkowski et al. (1992). This is the
consistent estimator for the variance.
"""
s_hat = sum(resids**2)
for i in range(1, lags + 1):
resids_prod = np.dot(resids[i:], resids[:nobs - i])
s_hat += 2 * resids_prod * (1. - (i / (lags + 1.)))
return s_hat / nobs
| 35.16705 | 96 | 0.613776 | [
"BSD-3-Clause"
] | josef-pkt/statsmodels | statsmodels/tsa/stattools.py | 45,893 | Python |
"""
ASGI config for ecom project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ecom.settings')
application = get_asgi_application()
| 22.647059 | 78 | 0.781818 | [
"BSD-3-Clause"
] | Limo-v/ecom | ecom/asgi.py | 385 | Python |
"""Tests for the smarttub integration."""
from datetime import timedelta
from openpeerpower.components.smarttub.const import SCAN_INTERVAL
from openpeerpower.util import dt
from tests.common import async_fire_time_changed
async def trigger_update(opp):
"""Trigger a polling update by moving time forward."""
new_time = dt.utcnow() + timedelta(seconds=SCAN_INTERVAL + 1)
async_fire_time_changed(opp, new_time)
await opp.async_block_till_done()
| 29 | 65 | 0.782328 | [
"Apache-2.0"
] | OpenPeerPower/core | tests/components/smarttub/__init__.py | 464 | Python |
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from polyaxon.proxies.schemas.streams.main import get_main_config
from tests.utils import BaseTestCase
@pytest.mark.proxies_mark
class TestStreamsMain(BaseTestCase):
SET_PROXIES_SETTINGS = True
def test_base_config(self):
expected = """
upstream polyaxon {
server unix:/polyaxon/web/polyaxon.sock;
}
server {
include polyaxon/polyaxon.base.conf;
}
""" # noqa
assert get_main_config() == expected
| 27.657895 | 74 | 0.747859 | [
"Apache-2.0"
] | AI-App/Polyaxon | src/tests/test_proxies/test_streams/test_main.py | 1,051 | Python |
"""This file contains utils for BEHAVIOR demo replay checkpoints."""
import json
import os
from igibson.objects.articulated_object import URDFObject
import pybullet as p
from igibson.utils.utils import restoreState
def save_task_relevant_state(env, root_directory, filename="behavior_dump"):
json_path = os.path.join(root_directory, f"{filename}.json")
# Save the dump in a file.
with open(json_path, "w") as f:
json.dump(save_task_relevant_object_and_robot_states(env), f)
def save_sim_urdf_object_state(sim, root_directory, filename="behavior_dump"):
json_path = os.path.join(root_directory, f"{filename}.json")
# Save the dump in a file.
with open(json_path, "w") as f:
json.dump(save_all_scene_object_and_robot_states(sim), f)
def save_checkpoint(simulator, root_directory):
bullet_path = os.path.join(root_directory, "%d.bullet" % simulator.frame_count)
json_path = os.path.join(root_directory, "%d.json" % simulator.frame_count)
# Save the simulation state.
p.saveBullet(bullet_path)
# Save the dump in a file.
with open(json_path, "w") as f:
json.dump(save_internal_states(simulator), f)
return simulator.frame_count
def load_checkpoint(simulator, root_directory, frame):
bullet_path = os.path.join(root_directory, "%d.bullet" % frame)
json_path = os.path.join(root_directory, "%d.json" % frame)
# Restore the simulation state.
# p.restoreState(fileName=bullet_path)
restoreState(fileName=bullet_path)
with open(json_path, "r") as f:
dump = json.load(f)
load_internal_states(simulator, dump)
# NOTE: For all articulated objects, we need to force_wakeup
# for the visuals in the simulator to update
for obj in simulator.scene.get_objects():
if isinstance(obj, URDFObject):
obj.force_wakeup()
def save_internal_states(simulator):
# Dump the object state.
object_dump = {}
for name, obj in simulator.scene.objects_by_name.items():
object_dump[name] = obj.dump_state()
# Dump the robot state.
robot_dump = []
for robot in simulator.robots:
robot_dump.append(robot.dump_state())
return {"objects": object_dump, "robots": robot_dump}
def load_internal_states(simulator, dump):
# NOTE: sometimes notebooks turn into hardbacks here.
# i.e if you (1) create iGibson BehaviorEnv, (2) save it
# (3) create a new iGibson BehaviorEnv with the same random seed
# and other parameters and (4) try to load the saved values from (2)
# you might see a KeyError for a notebook or hardback, but this is
# simply because creating a new environment in (3) somehow may cause
# some notebooks to be renamed as hardbacks!!!
# Restore the object state.
object_dump = dump["objects"]
for name, obj in simulator.scene.objects_by_name.items():
obj.load_state(object_dump[name])
# Restore the robot state.
robot_dumps = dump["robots"]
for robot, robot_dump in zip(simulator.robots, robot_dumps):
robot.load_state(robot_dump)
def save_task_relevant_object_and_robot_states(env):
# Dump the object state.
object_dump = {}
for obj in env.task_relevant_objects:
object_dump[obj.bddl_object_scope] = {'metadata': obj.metadata, 'asset_path': obj.model_path, 'pose': tuple(obj.get_position_orientation()), 'scale': tuple(obj.scale)}
# Dump the robot state.
robot_dump = []
for robot in env.simulator.robots:
robot_dump.append(robot.dump_state())
return {"objects": object_dump, "robots": robot_dump}
def save_all_scene_object_and_robot_states(sim):
# Dump the object state, but only for objects of type URDFObject
# that are in the sim.
object_dump = {}
for obj in sim.scene.get_objects():
if 'URDFObject' in str(type(obj)):
object_dump[obj.bddl_object_scope] = {'metadata': obj.metadata, 'asset_path': obj.model_path, 'pose': tuple(obj.get_position_orientation()), 'scale': tuple(obj.scale)}
# Dump the robot state.
robot_dump = []
for robot in sim.robots:
robot_dump.append(robot.dump_state())
return {"objects": object_dump, "robots": robot_dump} | 36.947368 | 179 | 0.704179 | [
"MIT"
] | Learning-and-Intelligent-Systems/iGibson | igibson/utils/checkpoint_utils.py | 4,212 | Python |
import json
import re
import sys
from argparse import ArgumentParser, ArgumentTypeError
from enum import Enum
from pathlib import Path
from typing import Any, Iterable, List, NewType, Optional, Tuple, Union
import dataclasses
DataClass = NewType("DataClass", Any)
DataClassType = NewType("DataClassType", Any)
def string_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)."
)
class KGEArgParser(ArgumentParser):
"""
This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.
The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)
arguments to the parser after initialization and you'll get the output back after parsing as an additional
namespace.
Examples:
>>> from toolbox.KGArgsParser import KGEArgParser
>>> # you should defined these: ModelArguments, DataArguments, TrainingArguments
>>> parser = KGEArgParser((ModelArguments, DataArguments, TrainingArguments))
>>> if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
>>> model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
>>> else:
>>> model_args, data_args, training_args = parser.parse_args_into_dataclasses()
"""
dataclass_types: Iterable[DataClassType]
def __init__(self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs):
"""
Args:
dataclass_types:
Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
kwargs:
(Optional) Passed to `argparse.ArgumentParser()` in the regular way.
"""
super().__init__(**kwargs)
if dataclasses.is_dataclass(dataclass_types):
dataclass_types = [dataclass_types]
self.dataclass_types = dataclass_types
for dtype in self.dataclass_types:
self._add_dataclass_arguments(dtype)
def _add_dataclass_arguments(self, dtype: DataClassType):
for field in dataclasses.fields(dtype):
if not field.init:
continue
field_name = f"--{field.name}"
kwargs = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, str):
raise ImportError(
"This implementation is not compatible with Postponed Evaluation of Annotations (PEP 563),"
"which can be opted in from Python 3.7 with `from __future__ import annotations`."
"We will add compatibility when Python 3.9 is released."
)
typestring = str(field.type)
for prim_type in (int, float, str):
for collection in (List,):
if (
typestring == f"typing.Union[{collection[prim_type]}, NoneType]"
or typestring == f"typing.Optional[{collection[prim_type]}]"
):
field.type = collection[prim_type]
if (
typestring == f"typing.Union[{prim_type.__name__}, NoneType]"
or typestring == f"typing.Optional[{prim_type.__name__}]"
):
field.type = prim_type
if isinstance(field.type, type) and issubclass(field.type, Enum):
kwargs["choices"] = [x.value for x in field.type]
kwargs["type"] = type(kwargs["choices"][0])
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
else:
kwargs["required"] = True
elif field.type is bool or field.type == Optional[bool]:
if field.default is True:
self.add_argument(f"--no_{field.name}", action="store_false", dest=field.name, **kwargs)
# Hack because type=bool in argparse does not behave as we want.
kwargs["type"] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is True if we have no default when of type bool.
default = True if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
kwargs["default"] = default
# This tells argparse we accept 0 or 1 value after --field_name
kwargs["nargs"] = "?"
# This is the value that will get picked if we do --field_name (without value)
kwargs["const"] = True
elif (
hasattr(field.type, "__origin__") and re.search(r"^typing\.List\[(.*)\]$",
str(field.type)) is not None
):
kwargs["nargs"] = "+"
kwargs["type"] = field.type.__args__[0]
assert all(
x == kwargs["type"] for x in field.type.__args__
), f"{field.name} cannot be a List of mixed types"
if field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
elif field.default is dataclasses.MISSING:
kwargs["required"] = True
else:
kwargs["type"] = field.type
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
elif field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
else:
kwargs["required"] = True
self.add_argument(field_name, **kwargs)
def parse_args_into_dataclasses(
self, args=None, return_remaining_strings=False, look_for_args_file=True, args_filename=None
) -> Tuple[DataClass, ...]:
"""
Parse command-line args into instances of the specified dataclass types.
This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:
docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
Args:
args:
List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)
return_remaining_strings:
If true, also return a list of remaining argument strings.
look_for_args_file:
If true, will look for a ".args" file with the same base name as the entry point script for this
process, and will append its potential content to the command line args.
args_filename:
If not None, will uses this file instead of the ".args" file specified in the previous argument.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.abspath
- if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser
after initialization.
- The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)
"""
if args_filename or (look_for_args_file and len(sys.argv)):
if args_filename:
args_file = Path(args_filename)
else:
args_file = Path(sys.argv[0]).with_suffix(".args")
if args_file.exists():
fargs = args_file.read_text().split()
args = fargs + args if args is not None else fargs + sys.argv[1:]
# in case of duplicate arguments the first one has precedence
# so we append rather than prepend.
namespace, remaining_args = self.parse_known_args(args=args)
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in vars(namespace).items() if k in keys}
for k in keys:
delattr(namespace, k)
obj = dtype(**inputs)
outputs.append(obj)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(namespace)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"Some specified arguments are not used by the KGEArgParser: {remaining_args}")
return (*outputs,)
def parse_json_file(self, json_file: str) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
dataclass types.
"""
data = json.loads(Path(json_file).read_text())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in data.items() if k in keys}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,)
def parse_dict(self, args: dict) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
types.
"""
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in args.items() if k in keys}
obj = dtype(**inputs)
outputs.append(obj)
return (*outputs,)
# class KGEArgParser:
# """The class implements the argument parser for the pykg2vec.
#
# KGEArgParser defines all the necessary arguments for the global and local
# configuration of all the modules.
#
# Attributes:
# general_group (object): It parses the general arguements used by most of the modules.
# general_hyper_group (object): It parses the arguments for the hyper-parameter tuning.
#
# Examples:
# >>> from toolbox.KGArgs import KGEArgParser
# >>> args = KGEArgParser().get_args()
# """
#
# def __init__(self):
# self.parser = ArgumentParser(description='Knowledge Graph Embedding tunable configs.')
#
# ''' argument group for hyperparameters '''
# self.general_hyper_group = self.parser.add_argument_group('Generic Hyperparameters')
# self.general_hyper_group.add_argument('-lmda', dest='lmbda', default=0.1, type=float,
# help='The lmbda for regularization.')
# self.general_hyper_group.add_argument('-b', dest='batch_size', default=128, type=int,
# help='training batch size')
# self.general_hyper_group.add_argument('-mg', dest='margin', default=0.8, type=float,
# help='Margin to take')
# self.general_hyper_group.add_argument('-opt', dest='optimizer', default='adam', type=str,
# help='optimizer to be used in training.')
# self.general_hyper_group.add_argument('-s', dest='sampling', default='uniform', type=str,
# help='strategy to do negative sampling.')
# self.general_hyper_group.add_argument('-ngr', dest='neg_rate', default=1, type=int,
# help='The number of negative samples generated per positive one.')
# self.general_hyper_group.add_argument('-l', dest='epochs', default=100, type=int,
# help='The total number of Epochs')
# self.general_hyper_group.add_argument('-lr', dest='learning_rate', default=0.01, type=float,
# help='learning rate')
# self.general_hyper_group.add_argument('-k', dest='hidden_size', default=50, type=int,
# help='Hidden embedding size.')
# self.general_hyper_group.add_argument('-km', dest='ent_hidden_size', default=50, type=int,
# help="Hidden embedding size for entities.")
# self.general_hyper_group.add_argument('-kr', dest='rel_hidden_size', default=50, type=int,
# help="Hidden embedding size for relations.")
# self.general_hyper_group.add_argument('-k2', dest='hidden_size_1', default=10, type=int,
# help="Hidden embedding size for relations.")
# self.general_hyper_group.add_argument('-l1', dest='l1_flag', default=True,
# type=lambda x: (str(x).lower() == 'true'),
# help='The flag of using L1 or L2 norm.')
# self.general_hyper_group.add_argument('-al', dest='alpha', default=0.1, type=float,
# help='The alpha used in self-adversarial negative sampling.')
# self.general_hyper_group.add_argument('-fsize', dest='filter_sizes', default=[1, 2, 3], nargs='+', type=int,
# help='Filter sizes to be used in convKB which acts as the widths of the kernals')
# self.general_hyper_group.add_argument('-fnum', dest='num_filters', default=50, type=int,
# help='Filter numbers to be used in convKB and InteractE.')
# self.general_hyper_group.add_argument('-fmd', dest='feature_map_dropout', default=0.2, type=float,
# help='feature map dropout value used in ConvE and InteractE.')
# self.general_hyper_group.add_argument('-idt', dest='input_dropout', default=0.3, type=float,
# help='input dropout value used in ConvE and InteractE.')
# self.general_hyper_group.add_argument('-hdt', dest='hidden_dropout', default=0.3, type=float,
# help='hidden dropout value used in ConvE.')
# self.general_hyper_group.add_argument('-hdt1', dest='hidden_dropout1', default=0.4, type=float,
# help='hidden dropout value used in TuckER.')
# self.general_hyper_group.add_argument('-hdt2', dest='hidden_dropout2', default=0.5, type=float,
# help='hidden dropout value used in TuckER.')
# self.general_hyper_group.add_argument('-lbs', dest='label_smoothing', default=0.1, type=float,
# help='The parameter used in label smoothing.')
# self.general_hyper_group.add_argument('-cmax', dest='cmax', default=0.05, type=float,
# help='The parameter for clipping values for KG2E.')
# self.general_hyper_group.add_argument('-cmin', dest='cmin', default=5.00, type=float,
# help='The parameter for clipping values for KG2E.')
# self.general_hyper_group.add_argument('-fp', dest='feature_permutation', default=1, type=int,
# help='The number of feature permutations for InteractE.')
# self.general_hyper_group.add_argument('-rh', dest='reshape_height', default=20, type=int,
# help='The height of the reshaped matrix for InteractE.')
# self.general_hyper_group.add_argument('-rw', dest='reshape_width', default=10, type=int,
# help='The width of the reshaped matrix for InteractE.')
# self.general_hyper_group.add_argument('-ks', dest='kernel_size', default=9, type=int,
# help='The kernel size to use for InteractE.')
# self.general_hyper_group.add_argument('-ic', dest='in_channels', default=9, type=int,
# help='The kernel size to use for InteractE.')
# self.general_hyper_group.add_argument('-evd', dest='ent_vec_dim', default=200, type=int, help='.')
# self.general_hyper_group.add_argument('-rvd', dest='rel_vec_dim', default=200, type=int, help='.')
#
# # basic configs
# self.general_group = self.parser.add_argument_group('Generic')
# self.general_group.add_argument('-mn', dest='model_name', default='TransE', type=str, help='Name of model')
# self.general_group.add_argument('-db', dest='debug', default=False, type=lambda x: (str(x).lower() == 'true'),
# help='To use debug mode or not.')
# self.general_group.add_argument('-exp', dest='exp', default=False, type=lambda x: (str(x).lower() == 'true'),
# help='Use Experimental setting extracted from original paper. (use Freebase15k by default)')
# self.general_group.add_argument('-ds', dest='dataset_name', default='Freebase15k', type=str,
# help='The dataset name (choice: fb15k/wn18/wn18_rr/yago/fb15k_237/ks/nations/umls)')
# self.general_group.add_argument('-dsp', dest='dataset_path', default=None, type=str,
# help='The path to custom dataset.')
# self.general_group.add_argument('-ld', dest='load_from_data', default=None, type=str,
# help='The path to the pretrained model.')
# self.general_group.add_argument('-sv', dest='save_model', default=True,
# type=lambda x: (str(x).lower() == 'true'), help='Save the model!')
# self.general_group.add_argument('-tn', dest='test_num', default=1000, type=int,
# help='The total number of test triples')
# self.general_group.add_argument('-ts', dest='test_step', default=10, type=int, help='Test every _ epochs')
# self.general_group.add_argument('-t', dest='tmp', default='../intermediate', type=str,
# help='The folder name to store trained parameters.')
# self.general_group.add_argument('-r', dest='result', default='../results', type=str,
# help='The folder name to save the results.')
# self.general_group.add_argument('-fig', dest='figures', default='../figures', type=str,
# help='The folder name to save the figures.')
# self.general_group.add_argument('-plote', dest='plot_embedding', default=False,
# type=lambda x: (str(x).lower() == 'true'), help='Plot the entity only!')
# self.general_group.add_argument('-plot', dest='plot_entity_only', default=False,
# type=lambda x: (str(x).lower() == 'true'), help='Plot the entity only!')
# self.general_group.add_argument('-device', dest='device', default='cpu', type=str, choices=['cpu', 'cuda'],
# help='Device to run pykg2vec (cpu or cuda).')
# self.general_group.add_argument('-npg', dest='num_process_gen', default=2, type=int,
# help='number of processes used in the Generator.')
# self.general_group.add_argument('-hpf', dest='hp_abs_file', default=None, type=str,
# help='The path to the hyperparameter configuration YAML file.')
# self.general_group.add_argument('-ssf', dest='ss_abs_file', default=None, type=str,
# help='The path to the search space configuration YAML file.')
# self.general_group.add_argument('-mt', dest='max_number_trials', default=100, type=int,
# help='The maximum times of trials for bayesian optimizer.')
#
# def get_args(self, args):
# """This function parses the necessary arguments.
#
# This function is called to parse all the necessary arguments.
#
# Returns:
# object: ArgumentParser object.
# """
# return self.parser.parse_args(args)
| 60.105114 | 134 | 0.571773 | [
"Apache-2.0"
] | LinXueyuanStdio/KGE-toolbox | toolbox/KGArgsParser.py | 21,157 | Python |
"""
Class for package details.
"""
import json
from typing import Any, Dict, List
def _log_info(msg: str) -> None:
print(msg)
class PackageAPI:
"""Package API class
This class is used to hold the interface of a given package
being analyzed by doppel. It's comparison operators enable comparison
between interfaces and its standard JSON format allows this comparison
to happen across programming languages.
"""
def __init__(self, pkg_dict: Dict[str, Any]):
"""
Class containing data that describes a package API
:param pkg_dict: A dictionary representation of a
software package, complying with the output format of
doppel-describe.
"""
self._validate_pkg(pkg_dict)
self.pkg_dict = pkg_dict
@classmethod
def from_json(cls, filename: str) -> "PackageAPI":
"""
Instantiate a Package object from a file.
:param filename: Name of the JSON file
that contains the description of the
target package's API.
"""
_log_info(f"Creating package from {filename}")
# read in output of "analyze.*" script
with open(filename, "r") as f:
pkg_dict = json.loads(f.read())
# validate
return cls(pkg_dict)
@staticmethod
def _validate_pkg(pkg_dict: Dict[str, Any]) -> None:
assert isinstance(pkg_dict, dict)
assert pkg_dict["name"] is not None
assert pkg_dict["language"] is not None
assert pkg_dict["functions"] is not None
assert pkg_dict["classes"] is not None
def name(self) -> str:
"""
Get the name of the package.
"""
return self.pkg_dict["name"]
def num_functions(self) -> int:
"""
Get the number of exported functions in the package.
"""
return len(self.function_names())
def function_names(self) -> List[str]:
"""
Get a list with the names of all exported functions
in the package.
"""
return sorted(list(self.pkg_dict["functions"].keys()))
def functions_with_args(self) -> Dict[str, Dict[str, Any]]:
"""
Get a dictionary with all exported functions in the package
and some details describing them.
"""
return self.pkg_dict["functions"]
def num_classes(self) -> int:
"""
Get the number of exported classes in the package.
"""
return len(self.class_names())
def class_names(self) -> List[str]:
"""
Get a list with the names of all exported classes
in the package.
"""
return sorted(list(self.pkg_dict["classes"].keys()))
def public_methods(self, class_name: str) -> List[str]:
"""
Get a list with the names of all public methods for a class.
:param class_name: Name of a class in the package
"""
return sorted(list(self.pkg_dict["classes"][class_name]["public_methods"].keys()))
def public_method_args(self, class_name: str, method_name: str) -> List[str]:
"""
Get a list of arguments for a public method from a class.
:param class_name: Name of a class in the package
:param method-name: Name of the method to get arguments for
"""
return list(self.pkg_dict["classes"][class_name]["public_methods"][method_name]["args"])
| 28.666667 | 96 | 0.611047 | [
"BSD-3-Clause"
] | franklinen/doppel-cli | doppel/PackageAPI.py | 3,440 | Python |
import pytest
from werkzeug.datastructures import MultiDict
from wtforms import Form, validators
from wtforms import BooleanField, StringField
from app.model.components.helpers import form_fields_dict
def _form_factory(form_class):
def _create_form(**kwargs):
form = form_class(MultiDict(kwargs))
form.validate()
return form
return _create_form
@pytest.fixture
def basic_form():
class TestForm(Form):
first_name = StringField(u'First Name', validators=[validators.input_required()])
last_name = StringField(u'Last Name', validators=[])
return _form_factory(TestForm)
@pytest.fixture
def form_with_checkbox():
class TestForm(Form):
first_name = StringField(u'First Name', validators=[validators.input_required()])
i_agree = BooleanField(u'Yes?', validators=[])
return _form_factory(TestForm)
class TestFormFieldsDict:
def test_should_return_value_and_errors(self, basic_form):
form = basic_form(first_name=None, last_name='Musterfrau')
props = form_fields_dict(form)
assert props == {
'first_name': {
'value': '',
'errors': ['This field is required.']
},
'last_name': {
'value': 'Musterfrau',
'errors': []
}
}
@pytest.mark.parametrize('checked', [True, False])
def test_checkboxes_should_return_checked_and_errors(self, form_with_checkbox, checked):
form = form_with_checkbox(first_name='Erika', i_agree=checked)
props = form_fields_dict(form)
assert props == {
'first_name': {
'value': 'Erika',
'errors': []
},
'i_agree': {
'checked': checked,
'errors': []
}
}
| 26.927536 | 92 | 0.606028 | [
"MIT"
] | digitalservice4germany/steuerlotse | webapp/tests/model/components/test_helpers.py | 1,858 | Python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django import forms
from django.contrib.sites.models import Site
from django.core.exceptions import PermissionDenied
from django.utils.encoding import smart_text
from django.utils.translation import (
ugettext_lazy as _,
get_language,
)
from cms.api import generate_valid_slug
from cms.constants import PAGE_TYPES_ID
from cms.exceptions import NoPermissionsException
from cms.models import Page, Title
from cms.plugin_pool import plugin_pool
from cms.utils import permissions
from cms.utils.page_permissions import (
user_can_add_page,
user_can_add_subpage,
)
from cms.utils.conf import get_cms_setting
from cms.utils.urlutils import static_with_version
try:
# djangocms_text_ckeditor is not guaranteed to be available
from djangocms_text_ckeditor.widgets import TextEditorWidget
text_widget = TextEditorWidget
except ImportError:
text_widget = forms.Textarea
class PageTypeSelect(forms.widgets.Select):
"""
Special widget for the page_type choice-field. This simply adds some JS for
hiding/showing the content field based on the selection of this select.
"""
class Media:
js = (
'cms/js/widgets/wizard.pagetypeselect.js',
)
class SlugWidget(forms.widgets.TextInput):
"""
Special widget for the slug field that requires Title field to be there.
Adds the js for the slugifying.
"""
class Media:
js = (
'admin/js/urlify.js',
static_with_version('cms/js/dist/bundle.forms.slugwidget.min.js'),
)
class BaseCMSPageForm(forms.Form):
page = None
title = forms.CharField(
label=_(u'Title'), max_length=255,
help_text=_(u"Provide a title for the new page."))
slug = forms.SlugField(
label=_(u'Slug'), max_length=255, required=False,
help_text=_(u"Leave empty for automatic slug, or override as required."),
widget=SlugWidget()
)
page_type = forms.ChoiceField(
label=_(u'Page type'), required=False, widget=PageTypeSelect())
content = forms.CharField(
label=_(u'Content'), widget=text_widget, required=False,
help_text=_(u"Optional. If supplied, will be automatically added "
u"within a new text plugin."))
def __init__(self, instance=None, *args, **kwargs):
# Expect instance argument here, as we have to accept some of the
# ModelForm __init__() arguments here for the ModelFormMixin cbv
self.instance = instance
super(BaseCMSPageForm, self).__init__(*args, **kwargs)
if self.page:
site = self.page.site_id
else:
site = Site.objects.get_current()
# Either populate, or remove the page_type field
if 'page_type' in self.fields:
root = Page.objects.filter(publisher_is_draft=True,
reverse_id=PAGE_TYPES_ID,
site=site).first()
if root:
page_types = root.get_descendants()
else:
page_types = Page.objects.none()
if root and page_types:
# Set the choicefield's choices to the various page_types
language = get_language()
type_ids = page_types.values_list('pk', flat=True)
titles = Title.objects.filter(page__in=type_ids,
language=language)
choices = [('', '---------')]
for title in titles:
choices.append((title.page_id, title.title))
self.fields['page_type'].choices = choices
else:
# There are no page_types, so don't bother the user with an
# empty choice field.
del self.fields['page_type']
class CreateCMSPageForm(BaseCMSPageForm):
@staticmethod
def get_placeholder(page, slot=None):
"""
Returns the named placeholder or, if no «slot» provided, the first
editable, non-static placeholder or None.
"""
placeholders = page.get_placeholders()
if slot:
placeholders = placeholders.filter(slot=slot)
for ph in placeholders:
if not ph.is_static and ph.is_editable:
return ph
return None
def clean(self):
"""
Validates that either the slug is provided, or that slugification from
`title` produces a valid slug.
:return:
"""
cleaned_data = super(CreateCMSPageForm, self).clean()
slug = cleaned_data.get("slug")
sub_page = cleaned_data.get("sub_page")
title = cleaned_data.get("title")
if self.page:
if sub_page:
parent = self.page
else:
parent = self.page.parent
else:
parent = None
if slug:
starting_point = slug
elif title:
starting_point = title
else:
starting_point = _("page")
slug = generate_valid_slug(starting_point, parent, self.language_code)
if not slug:
raise forms.ValidationError("Please provide a valid slug.")
cleaned_data["slug"] = slug
return cleaned_data
def save(self, **kwargs):
from cms.api import create_page, add_plugin
# Check to see if this user has permissions to make this page. We've
# already checked this when producing a list of wizard entries, but this
# is to prevent people from possible form-hacking.
if 'sub_page' in self.cleaned_data:
sub_page = self.cleaned_data['sub_page']
else:
sub_page = False
if self.page and sub_page:
# User is adding a page which will be a direct
# child of the current page.
position = 'last-child'
parent = self.page
has_perm = user_can_add_subpage(self.user, target=parent)
elif self.page and self.page.parent_id:
# User is adding a page which will be a right
# sibling to the current page.
position = 'last-child'
parent = self.page.parent
has_perm = user_can_add_subpage(self.user, target=parent)
else:
parent = None
position = 'last-child'
has_perm = user_can_add_page(self.user)
if not has_perm:
raise NoPermissionsException(
_(u"User does not have permission to add page."))
page = create_page(
title=self.cleaned_data['title'],
slug=self.cleaned_data['slug'],
template=get_cms_setting('PAGE_WIZARD_DEFAULT_TEMPLATE'),
language=self.language_code,
created_by=smart_text(self.user),
parent=parent,
position=position,
in_navigation=True,
published=False
)
page_type = self.cleaned_data.get("page_type")
if page_type:
copy_target = Page.objects.filter(pk=page_type).first()
else:
copy_target = None
if copy_target:
# If the user selected a page type, copy that.
if not copy_target.has_view_permission(self.user):
raise PermissionDenied()
# Copy page attributes
copy_target._copy_attributes(page, clean=True)
page.save()
# Copy contents (for each language)
for lang in copy_target.get_languages():
copy_target._copy_contents(page, lang)
# Copy extensions
from cms.extensions import extension_pool
extension_pool.copy_extensions(copy_target, page)
else:
# If the user provided content, then use that instead.
content = self.cleaned_data.get('content')
plugin_type = get_cms_setting('PAGE_WIZARD_CONTENT_PLUGIN')
plugin_body = get_cms_setting('PAGE_WIZARD_CONTENT_PLUGIN_BODY')
slot = get_cms_setting('PAGE_WIZARD_CONTENT_PLACEHOLDER')
if plugin_type in plugin_pool.plugins and plugin_body:
if content and permissions.has_plugin_permission(
self.user, plugin_type, "add"):
placeholder = self.get_placeholder(page, slot=slot)
if placeholder:
opts = {
'placeholder': placeholder,
'plugin_type': plugin_type,
'language': self.language_code,
plugin_body: content,
}
add_plugin(**opts)
# is it the first page? publish it right away
if not self.page and Page.objects.filter(site_id=page.site_id).count() == 1:
page.publish(self.language_code)
Page.set_homepage(page, user=self.user)
return page
class CreateCMSSubPageForm(CreateCMSPageForm):
sub_page = forms.BooleanField(initial=True, widget=forms.HiddenInput)
| 35.376923 | 84 | 0.602087 | [
"BSD-3-Clause"
] | rspeed/django-cms-contrib | cms/forms/wizards.py | 9,200 | Python |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class CfnInitScenarioTest(manager.OrchestrationScenarioTest):
def setUp(self):
super(CfnInitScenarioTest, self).setUp()
if not CONF.orchestration.image_ref:
raise self.skipException("No image available to test")
self.client = self.orchestration_client
self.template_name = 'cfn_init_signal.yaml'
def assign_keypair(self):
self.stack_name = self._stack_rand_name()
if CONF.orchestration.keypair_name:
self.keypair = None
self.keypair_name = CONF.orchestration.keypair_name
else:
self.keypair = self.create_keypair()
self.keypair_name = self.keypair['name']
def launch_stack(self):
net = self._get_default_network()
self.parameters = {
'key_name': self.keypair_name,
'flavor': CONF.orchestration.instance_type,
'image': CONF.orchestration.image_ref,
'timeout': CONF.orchestration.build_timeout,
'network': net['id'],
}
# create the stack
self.template = self._load_template(__file__, self.template_name)
_, stack = self.client.create_stack(
name=self.stack_name,
template=self.template,
parameters=self.parameters)
stack = stack['stack']
_, self.stack = self.client.get_stack(stack['id'])
self.stack_identifier = '%s/%s' % (self.stack_name, self.stack['id'])
self.addCleanup(self.delete_wrapper,
self.orchestration_client.delete_stack,
self.stack_identifier)
def check_stack(self):
sid = self.stack_identifier
self.client.wait_for_resource_status(
sid, 'WaitHandle', 'CREATE_COMPLETE')
self.client.wait_for_resource_status(
sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
self.client.wait_for_resource_status(
sid, 'SmokeKeys', 'CREATE_COMPLETE')
self.client.wait_for_resource_status(
sid, 'CfnUser', 'CREATE_COMPLETE')
self.client.wait_for_resource_status(
sid, 'SmokeServer', 'CREATE_COMPLETE')
_, server_resource = self.client.get_resource(sid, 'SmokeServer')
server_id = server_resource['physical_resource_id']
_, server = self.servers_client.get_server(server_id)
server_ip =\
server['addresses'][CONF.compute.network_for_ssh][0]['addr']
if not self.ping_ip_address(
server_ip, ping_timeout=CONF.orchestration.build_timeout):
self._log_console_output(servers=[server])
self.fail(
"(CfnInitScenarioTest:test_server_cfn_init) Timed out waiting "
"for %s to become reachable" % server_ip)
try:
self.client.wait_for_resource_status(
sid, 'WaitCondition', 'CREATE_COMPLETE')
except (exceptions.StackResourceBuildErrorException,
exceptions.TimeoutException) as e:
raise e
finally:
# attempt to log the server console regardless of WaitCondition
# going to complete. This allows successful and failed cloud-init
# logs to be compared
self._log_console_output(servers=[server])
self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
_, stack = self.client.get_stack(sid)
# This is an assert of great significance, as it means the following
# has happened:
# - cfn-init read the provided metadata and wrote out a file
# - a user was created and credentials written to the server
# - a cfn-signal was built which was signed with provided credentials
# - the wait condition was fulfilled and the stack has changed state
wait_status = json.loads(
self._stack_output(stack, 'WaitConditionStatus'))
self.assertEqual('smoke test complete', wait_status['smoke_status'])
if self.keypair:
# Check that the user can authenticate with the generated
# keypair
try:
linux_client = self.get_remote_client(
server_ip, username='ec2-user')
linux_client.validate_authentication()
except (exceptions.ServerUnreachable,
exceptions.SSHTimeout) as e:
self._log_console_output(servers=[server])
raise e
@test.attr(type='slow')
@test.services('orchestration', 'compute')
def test_server_cfn_init(self):
self.assign_keypair()
self.launch_stack()
self.check_stack()
| 39.883212 | 79 | 0.645864 | [
"Apache-2.0"
] | ePlusPS/tempest | tempest/scenario/orchestration/test_server_cfn_init.py | 5,464 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/ssh_host_key.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import base64
import binascii
import hashlib
import logging
import os
from king_phisher import its
from king_phisher import errors
from king_phisher.client import gui_utilities
from gi.repository import GLib
from gi.repository import Gtk
from gi.repository import Pango
import paramiko
import paramiko.hostkeys
__all__ = ('HostKeyAcceptDialog', 'HostKeyWarnDialog')
class BaseHostKeyDialog(gui_utilities.GladeGObject):
"""
A base class for dialogs which show information about SSH host keys. It is
assumed that the widgets defined in :py:attr:`.dependencies` are present
including one button to accept the host key, and one to reject. The class's
default response can be set using :py:attr:`.default_response`.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_accept',
'button_reject',
'textview_key_details'
),
top_level=(
'StockApplyImage',
'StockStopImage'
)
)
top_gobject = 'dialog'
default_response = None
"""The response that should be selected as the default for the dialog."""
def __init__(self, application, hostname, key):
"""
:param application: The application to associate this popup dialog with.
:type application: :py:class:`.KingPhisherClientApplication`
:param str hostname: The hostname associated with the key.
:param key: The host's SSH key.
:type key: :py:class:`paramiko.pkey.PKey`
"""
super(BaseHostKeyDialog, self).__init__(application)
self.hostname = hostname
self.key = key
textview = self.gobjects['textview_key_details']
textview.modify_font(Pango.FontDescription('monospace 9'))
textview.get_buffer().set_text(self.key_details)
if self.default_response is not None:
button = self.dialog.get_widget_for_response(response_id=self.default_response)
button.grab_default()
@property
def key_details(self):
key_type = self.key.get_name().lower()
details = "Host: {0} ({1})\n".format(self.hostname, key_type)
if key_type.startswith('ssh-'):
key_type = key_type[4:]
key_type = key_type.split('-', 1)[0].upper()
details += "{0} key fingerprint is SHA256:{1}.\n".format(key_type, base64.b64encode(hashlib.new('sha256', self.key.asbytes()).digest()).decode('utf-8'))
details += "{0} key fingerprint is MD5:{1}.\n".format(key_type, binascii.b2a_hex(hashlib.new('md5', self.key.asbytes()).digest()).decode('utf-8'))
return details
def interact(self):
self.dialog.show_all()
response = self.dialog.run()
self.dialog.destroy()
return response
class HostKeyAcceptDialog(BaseHostKeyDialog):
"""
A dialog that shows an SSH host key for a host that has not previously had
one associated with it.
"""
default_button = Gtk.ResponseType.ACCEPT
class HostKeyWarnDialog(BaseHostKeyDialog):
"""
A dialog that warns about an SSH host key that does not match the one that
was previously stored for the host.
"""
default_button = Gtk.ResponseType.REJECT
def signal_checkbutton_toggled(self, button):
self.gobjects['button_accept'].set_sensitive(button.get_property('active'))
class MissingHostKeyPolicy(paramiko.MissingHostKeyPolicy):
"""
A host key policy for use with paramiko that will validate SSH host keys
correctly. If a key is new, the user will be prompted with
:py:class:`.HostKeyAcceptDialog` dialog to accept it or if the host key does
not match the user will be warned with :py:class:`.HostKeyWarnDialog`. The
host keys accepted through this policy are stored in an OpenSSH compatible
"known_hosts" file using paramiko.
"""
def __init__(self, application):
"""
:param application: The application which is using this policy.
:type application: :py:class:`.KingPhisherClientApplication`
"""
self.application = application
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
super(MissingHostKeyPolicy, self).__init__()
def missing_host_key(self, client, hostname, key):
host_key_fingerprint = 'sha256:' + base64.b64encode(hashlib.new('sha256', key.asbytes()).digest()).decode('utf-8')
host_keys = paramiko.hostkeys.HostKeys()
host_keys_modified = False
known_hosts_file = self.application.config.get('ssh_known_hosts_file', os.path.join(GLib.get_user_config_dir(), 'king-phisher', 'known_hosts'))
if os.access(known_hosts_file, os.R_OK):
host_keys.load(known_hosts_file)
if host_keys.lookup(hostname):
if host_keys.check(hostname, key):
self.logger.debug("accepting known ssh host key {0} {1} {2}".format(hostname, key.get_name(), host_key_fingerprint))
return
self.logger.warning("ssh host key does not match known value for {0}".format(hostname))
dialog = HostKeyWarnDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('bad ssh host key for ' + hostname)
else:
dialog = HostKeyAcceptDialog(self.application, hostname, key)
if dialog.interact() != Gtk.ResponseType.ACCEPT:
raise errors.KingPhisherAbortError('unknown ssh host key not accepted by the user for ' + hostname)
host_keys.add(hostname, key.get_name(), key)
host_keys_modified = True
if host_keys_modified:
try:
host_keys.save(known_hosts_file)
os.chmod(known_hosts_file, 0o600)
except IOError if its.py_v2 else PermissionError:
self.logger.warning('failed to save the known_hosts file and set its permissions')
| 40.526316 | 154 | 0.755411 | [
"BSD-3-Clause"
] | tanc7/king-phisher | king_phisher/client/dialogs/ssh_host_key.py | 6,930 | Python |
"""
generate_data.py
Core script for generating training/test addition data. First, generates random pairs of numbers,
then steps through an execution trace, computing the exact order of subroutines that need to be
called.
"""
import pickle
import numpy as np
from tasks.bubblesort.env.trace import Trace
def generate_bubblesort(prefix, num_examples, debug=False, maximum=10000000000, debug_every=1000):
"""
Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified
number of examples.
:param prefix: String prefix for saving the file ('train', 'test')
:param num_examples: Number of examples to generate.
"""
data = []
for i in range(num_examples):
array = np.random.randint(10, size=5)
if debug and i % debug_every == 0:
traces = Trace(array, True).traces
else:
traces = Trace(array).traces
data.append((array, traces))
# print(data)
with open('tasks/bubblesort/data/{}.pik'.format(prefix), 'wb') as f:
pickle.dump(data, f) | 31.529412 | 98 | 0.679104 | [
"BSD-3-Clause"
] | ford-core-ai/neural-programming-architectures | tasks/bubblesort/env/generate_data.py | 1,072 | Python |
# Format of required info for batch runs.
debug = 0
AMQPdebug = 0
debugTimer = 0
simNotes = """
Step of load up 10%, Reff Enabled
"""
# Simulation Parameters Dictionary
simParams = {
'timeStep': 1.0,
'endTime': 60.0,
'slackTol': 1,
'PY3msgGroup' : 3,
'IPYmsgGroup' : 60,
'Hinput' : 0.0, # MW*sec of entire system, if !> 0.0, will be calculated in code
'Dsys' : 0.0, # Untested
'fBase' : 60.0, # System F base in Hertz
'freqEffects' : True, # w in swing equation will not be assumed 1 if true
# Mathematical Options
'integrationMethod' : 'rk45',
# Data Export Parameters
'fileDirectory' : "\\delme\\sixMachineStep\\", # relative path from cwd
'fileName' : 'SixMachineStep4',
'exportFinalMirror': 1, # Export mirror with all data
'exportMat': 1, # if IPY: requies exportDict == 1 to work
'exportDict' : 0, # when using python 3 no need to export dicts.
'deleteInit' : 0, # Delete initialized mirror
}
savPath = r"C:\LTD\pslf_systems\sixMachine\sixMachine.sav"
dydPath = [r"C:\LTD\pslf_systems\sixMachine\sixMachine.dyd"]
ltdPath = r".\testCases\sixMachine\sixMachineStep.ltd.py" | 34 | 84 | 0.66263 | [
"MIT"
] | thadhaines/PSLTDSim | testCases/sixMachine/sixMachineStep4.py | 1,156 | Python |
""" GIGATL experiment specifications
dimpart contains
- "netcdfdimnames": how to map netCDF to CROCO dimensions
- "domainpartition": which CROCO dimensions are tiled
"""
import os
import sys
import glob
from shapely.geometry.polygon import Polygon
import pickle
from pretty import BB
import pretty
import tempfile
try:
from mpi4py import MPI
is_mpi = True
except:
print("MPI not found, no problem")
is_mpi = False
if is_mpi:
comm = MPI.COMM_WORLD
rank = MPI.COMM_WORLD.Get_rank()
else:
rank = 0
def barrier():
if is_mpi:
MPI.COMM_WORLD.Barrier()
else:
pass
def abort():
if is_mpi:
MPI.COMM_WORLD.Abort()
else:
sys.exit()
# path to where giga_tools.py sits
dirmodule = os.path.dirname(pretty.__file__)
sep = os.path.sep
# path to pickle GIGATL data files
dirdata = sep.join(dirmodule.split(sep)[:-1] + ["data"])
subdomains = range(1, 14)
partition = (100, 100)
nsigma = 100
dimpart = {"netcdfdimnames":
{"sigma": ("sigma_rho", "sigma_w"),
"eta": ("eta_rho", "eta_v"),
"xi": ("xi_rho", "xi_u")},
"domainpartition": ("eta", "xi")}
domain = os.popen("hostname -d").read()
if "tgcc" in domain:
hostname = "irene"
elif "univ-brest" in domain:
hostname = os.popen("hostname").read()[:-1]
else:
raise ValueError("Could not find the Gigatl data")
if rank == 0:
print(f"hostname: {hostname}")
print(f"paragridded is located in : {dirmodule}")
print(f"data are located in : {dirdata}")
barrier()
def create_hisdir():
subd = 1
dirname = dirhis.format(subd=subd)
parent = os.path.abspath(dirname+"/..")
content = glob.glob(parent+"/??")
ok = True
for subd in subdomains:
dirname = dirhis.format(subd=subd)
if dirname in content:
if os.path.isdir(dirname):
command = f"fusermount -u {dirname}"
try:
os.system(command)
except:
pass
else:
ok = False
else:
os.makedirs(dirname)
if not ok:
print(f"{parent} needs to be cleaned")
try:
os.rename(f"{parent}", f"{dirtrash}/")
except:
abort()
for subd in subdomains:
dirname = dirhis.format(subd=subd)
os.makedirs(dirname)
print(f"{parent} has been cleaned", flush=True)
else:
print(f"{parent} is sound")
def create_directory(dirname, attempt=0):
if not os.path.isdir(dirname):
parent = os.path.abspath(dirname+"/..")
content = glob.glob(parent+"/*")
if not dirname in content:
os.makedirs(dirname)
if attempt == 2:
print(f"{dirname}: moved to trash")
else:
if attempt == 1:
print("{dirname}: fusermount didn't work")
command = f"mv -f {parent} {dirtrash}"
os.system(command)
create_directory(dirname, attempt=2)
if attempt == 2:
print(f"{dirname}: really serious problem")
abort()
else:
print(f"{dirname}: problem with the fuse system")
#print("*** try to fix it with fuserumount")
command = f"fusermount -u {dirname}"
os.system(command)
create_directory(dirname, attempt=1)
if hostname == "irene":
dirgridtar = "/ccc/store/cont003/gch0401/gch0401/GIGATL1_1h_tides/GRD/{subd:02}"
dirgigaref = "/ccc/store/cont003/gch0401/gch0401/GIGATL1_1h_tides/HIS_1h/{subd:02}"
dirgiga = "/ccc/store/cont003/gch0401/groullet/GIGATL1_1h_tides/HIS_1h/{subd:02}"
# fix_filename_on_store
# dirsurf = "/ccc/store/cont003/gch0401/gch0401/GIGATL1_1h_tides/SURF"
dirmounted_root = "/ccc/work/cont003/gch0401/groullet/gigatl"
dirmounted = f"{dirmounted_root}/R_{rank:04}"
dirgrid = "/ccc/scratch/cont003/gen12051/gulaj/GIGATL1/GRD3"
dirhis = dirmounted+"/HIS/{subd:02}"
#dirtrash = f"/ccc/scratch/cont003/gen12051/groullet/trash"
# for d in [dirhis]:
# for subd in subdomains:
# dirname = d.format(subd=subd)
# create_directory(dirname)
# or use directly
# dirgrid = "/ccc/scratch/cont003/ra4735/gulaj/GIGATL1/INIT_N100_100_100/GRD3"
# hisindex = [0, 6, 12, 18] | [24, 30, 36, 42] | [48, 54, 60, 66]
# | [72, 78, 84, 90] | [96, 102, 108, 114]
# e.g. "2008-09-19" contains [96, 102, 108, 114]
hisindex = 36
# hisdate = "2008-03-14" .. "2008-11-18" (included)
# 250 days as of Nov 17th 2020
hisdate = "2008-09-26"
targridtemplate = "gigatl1_grd_masked.{subd:02}.tar"
tarhistemplate = "gigatl1_his_1h.{hisdate}.{subd:02}.tar"
for subd in subdomains:
assert os.path.isdir(dirgigaref.format(subd=subd))
else:
dirgrid = "/net/omega/local/tmp/1/gula/GIGATL1/GIGATL1_1h_tides/GRD"
dirsurf = "/net/omega/local/tmp/1/gula/GIGATL1/GIGATL1_1h_tides/SURF/gigatl1_surf.2008-05-23"
dirgigaref = "/net/omega/local/tmp/1/gula/GIGATL1/GIGATL1_1h_tides/HIS_1h"
dirgiga = "/net/omega/local/tmp/1/gula/GIGATL1/GIGATL1_1h_tides/HIS_1h"
dirmounted_root = "/net/omega/local/tmp/1/roullet/gigatl"
dirmounted = f"{dirmounted_root}/R_{rank:04}"
dirhis = dirmounted+"/HIS/{subd:02}"
#dirtrash = "/net/omega/local/tmp/1/roullet/trash"
hisindex = 72
hisdate = "2008-09-23"
tarhistemplate = "gigatl1_his_1h.{hisdate}.{subd:02}.tar"
# for d in [dirhis]:
# for subd in subdomains:
# dirname = d.format(subd=subd)
# create_directory(dirname)
hour = 14
sqlitesdir = f"{dirmounted_root}/sqlites"
def check():
""" check that all paths are properly defined"""
checked = True
print(f" - history tar files will be mounted on: {dirmounted_root}")
print(f" - ratarmount executable is in : {ratarmount}")
def setup_directories():
if rank == 0:
if not os.path.isdir(sqlitesdir):
os.makedirs(sqlitesdir)
# if not os.path.isdir(dirtrash):
# os.makedirs(dirtrash)
# else:
# command = f"rm -Rf {dirtrash}/*"
# os.system(command)
if not os.path.isdir(dirmounted):
os.makedirs(dirmounted)
barrier()
create_hisdir()
def get_subd(tile):
if tile in subdmap:
return subdmap[tile]
else:
return -1
def grdfiles(tile):
#print(f" read grid {tile}")
subd = get_subd(tile)
directory = dirgrid.format(subd=subd)
filename = f"{directory}/gigatl1_grd_masked.{tile:04}.nc"
if not os.path.isfile(filename):
mount(subd, grid=True)
return filename
def surffiles(tile):
return f"{dirsurf}/gigatl1_surf.{tile:04}.nc"
def hisfiles(tile):
subd = get_subd(tile)
if subd > 0:
directory = dirhis.format(subd=subd)
files = sorted(glob.glob(f"{directory}/gigatl1_his.*.{tile:04}.nc"))
_dateindex = [int(f.split(".")[-3]) for f in files]
_hisindex = _dateindex[int(hour)//6]
filename = f"{directory}/gigatl1_his.{_hisindex:06}.{tile:04}.nc"
if not os.path.isfile(filename):
mount(subd)
return filename
else:
return ""
def get_subdmap(directory):
"""Reconstruct how netCDF files are stored in fused directory
directory == dirgrid | dirhis """
_subdmap = {}
for subd in subdomains:
fs = glob.glob(directory.format(subd=subd)+"/*.nc")
tiles = [int(f.split(".")[-2]) for f in fs]
for t in tiles:
_subdmap[t] = subd
return _subdmap
def set_ratarmount():
mount = "ratarmount"
options = "" # "-c -gs 160000"
ratarmount = os.popen(f"which {mount}").read()
if len(ratarmount) > 0:
# remove the trailing "\n"
ratarmount = ratarmount[:-1]
print(f"found ratarmount in : {ratarmount}")
else:
if rank == 0:
print("")
print(BB("warning").center(20, "*"))
print(f"{mount} is not installed or cannot be found")
print("you can set it manually with")
print("giga.ratarmount = /path/to/bin/ratarmount")
return ratarmount
def mount_tar(source, tarfile, destdir):
"""
source: str, directory of the tar files
template: str, template name for the tar file containing "{subd"
subd: int, index of the subdomain (0<=subd<=13)
destdir: str, directory where to archivemount
"""
srcfile = f"{source}/{tarfile}"
#print(f"mount {srcfile} on {destdir}")
assert os.path.isfile(srcfile), f"{srcfile} does not exsit"
sqlitefile = get_sqlitefilename(srcfile)
home = os.path.expanduser("~")
ratardirsqlite = f"{home}/.ratarmount"
if os.path.isfile(f"{ratardirsqlite}/{sqlitefile}"):
# nothing to do
pass
else:
if os.path.isfile(f"{sqlitesdir}/{sqlitefile}"):
command = f"cp {sqlitesdir}/{sqlitefile} {ratardirsqlite}/"
os.system(command)
assert len(ratarmount) > 0, BB("You forgot to set the ratarmount path")
command = f"{ratarmount} {srcfile} {destdir}"
os.system(command)
if os.path.isfile(f"{sqlitesdir}/{sqlitefile}"):
# nothing to do
pass
else:
command = f"cp {ratardirsqlite}/{sqlitefile} {sqlitesdir}/"
os.system(command)
# delete sqlitefile on ratardirsqlite
# os.remove(f"{ratardirsqlite}/{sqlitefile}")
def mount(subd, grid=False, overwrite=True):
"""Mount tar file `subd`"""
if grid:
destdir = dirgrid.format(subd=subd)
srcdir = dirgridtar.format(subd=subd)
tarfile = targridtemplate.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
srcdir = dirgigaref.format(subd=subd)
tarfile = tarhistemplate.format(hisdate=hisdate, subd=subd)
tomount = True
if os.path.exists(destdir):
#print(f"{destdir} already exists")
if len(os.listdir(destdir)) == 0:
# folder is empty
pass
elif overwrite:
# folder is not empty but we want to overwrite it
# first let's unmount it
command = f"fusermount -u {destdir}"
try:
os.system(command)
except:
pass
#
assert len(os.listdir(f"{destdir}")) == 0
else:
tomount = False
else:
print(f"*** makedir {destdir}")
# os.makedirs(destdir)
if tomount:
mount_tar(srcdir, tarfile, destdir)
if not(grid):
write_toc(destdir, subd, hisdate)
def write_toc(destdir, subd, _hisdate):
with open(f"{destdir}/../hisdate_{subd:02}.txt", mode="w") as fid:
fid.write(_hisdate)
def read_toc(destdir, subd):
with open(f"{destdir}/../hisdate_{subd:02}.txt", mode="r") as fid:
return fid.read()
def mount_all(grid=False):
for subd in subdomains:
mount(subd, grid=grid)
def mount_stats(grid=False):
""" Print statistics on mounted tar files"""
print("-"*40)
print(BB("statistics on mounted tar files"))
print(f"mounting point: {dirmounted}")
for subd in subdomains:
if grid:
destdir = dirgrid.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
if os.path.exists(destdir):
filelist = os.listdir(f"{destdir}")
nbfiles = len(filelist)
if nbfiles > 0:
tiles = set([int(f.split(".")[-2]) for f in filelist])
nbtiles = len(tiles)
tile = list(tiles)[0]
fs = [f for f in filelist if f"{tile:04}.nc" in f]
if grid:
msg = f" - {subd:02} : {nbtiles:03} tiles"
else:
_hisdate = read_toc(destdir, subd)
# dateindex = sorted([int(f.split(".")[-3]) for f in fs])
# msg = f" - {subd:02} : {nbtiles:03} tiles x {dateindex} dateindex"
bbhisdate = BB(_hisdate)
msg = f" - {subd:02} : {bbhisdate} with {nbtiles:03} tiles"
else:
msg = f" - {subd:02} : empty"
else:
warning = BB("destroyed")
msg = f" - {subd:02} : {warning}"
print(msg)
def umount_all(grid=False):
for subd in subdomains:
umount(subd, grid=grid)
def umount(subd, grid=False):
""" Unmount `subd` tar archive folder
The command to unmount a fuse folder is fusermount -u"""
if grid:
destdir = dirgrid.format(subd=subd)
else:
destdir = dirhis.format(subd=subd)
if os.path.isdir(destdir) and len(os.listdir(f"{destdir}")) != 0:
command = f"fusermount -u {destdir}"
os.system(command)
#command = f"rmdir {destdir}"
# os.system(command)
else:
pass
#print(f"{destdir} is already umounted")
def get_sqlitefilename(tarfile):
sqlitefile = "_".join(tarfile.split("/"))+".index.sqlite"
return sqlitefile
def LLTP2domain(lowerleft, topright):
"""Convert the two pairs of (lower, left), (top, right) in (lat, lon)
into the four pairs of (lat, lon) of the corners """
xa, ya = lowerleft
xb, yb = topright
domain = [(xa, ya), (xa, yb), (xb, yb), (xb, ya)]
return domain
def find_tiles_inside(domain, corners):
"""Determine which tiles are inside `domain`
The function uses `corners` the list of corners for each tile
"""
p = Polygon(domain)
tileslist = []
for tile, c in corners.items():
q = Polygon(c)
if p.overlaps(q) or p.contains(q):
tileslist += [tile]
return tileslist
def get_dates():
"""
Scan dirgiga for *tar files
"""
subd = 1
pattern = f"{dirgigaref}/*.{subd:02}.tar".format(subd=subd)
files = glob.glob(pattern)
_dates_tar = [f.split("/")[-1].split(".")[-3] for f in files]
#print(f"------SCAN GIGATL HIS databas --------")
# print("files:")
# print(files)
# print(glob.glob(dirgiga.format(subd=1)+"/*"))
return sorted(_dates_tar)
def get_hisindexes_in_histar(hisdate):
# select one subd
subd = 1
# select one tile in this subd
tile = [t for t, s in subdmap.items() if s == subd][0]
tarfile = "/".join([dirgiga, tarhistemplate])
tarfile = tarfile.format(hisdate=hisdate, subd=subd)
files = os.popen(f"tar tvf {tarfile} *{tile:04}.nc").readlines()
hisindexes = [int(f.split(".")[-3]) for f in files]
return hisindexes
if hostname == "irene":
hisdates = get_dates()
else:
hisdates = ["2008-09-26"]
# try:
# umount_all()
# except:
# MPI.COMM_WORLD.Abort()
# corners and submap are stored in pickle files
with open(f"{dirdata}/giga_corners.pkl", "rb") as f:
corners = pickle.load(f)
assert len(corners) == 6582, "something is wrong with data/giga_corners.pkl"
with open(f"{dirdata}/giga_subdmap.pkl", "rb") as f:
subdmap = pickle.load(f)
assert len(subdmap) == 6582, "something is wrong with data/giga_subdmap.pkl"
with open(f"{dirdata}/gigaspecs.pkl", "rb") as f:
corners = pickle.load(f)
missing = pickle.load(f)
subdmap = pickle.load(f)
if False:
dirs = glob.glob(dirmounted+"/R*/HIS/??")
for d in dirs:
try:
command = f"fusermount -u {d}"
os.system(command)
except:
command = f"rm -Rf {d}"
os.system(command)
ratarmount = set_ratarmount()
barrier()
| 27.956989 | 97 | 0.587821 | [
"MIT"
] | Mesharou/paragridded | paragridded/giga_tools.py | 15,600 | Python |
"""Helper methods to handle the time in Home Assistant."""
import datetime as dt
import re
from typing import Any, Dict, List, Optional, Tuple, Union, cast
import pytz
import pytz.exceptions as pytzexceptions
import pytz.tzinfo as pytzinfo
from homeassistant.const import MATCH_ALL
DATE_STR_FORMAT = "%Y-%m-%d"
UTC = pytz.utc
DEFAULT_TIME_ZONE: dt.tzinfo = pytz.utc
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r"(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})"
r"[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})"
r"(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?"
r"(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$"
)
def set_default_time_zone(time_zone: dt.tzinfo) -> None:
"""Set a default time zone to be used when none is specified.
Async friendly.
"""
global DEFAULT_TIME_ZONE
# NOTE: Remove in the future in favour of typing
assert isinstance(time_zone, dt.tzinfo)
DEFAULT_TIME_ZONE = time_zone
def get_time_zone(time_zone_str: str) -> Optional[dt.tzinfo]:
"""Get time zone from string. Return None if unable to determine.
Async friendly.
"""
try:
return pytz.timezone(time_zone_str)
except pytzexceptions.UnknownTimeZoneError:
return None
def utcnow() -> dt.datetime:
"""Get now in UTC time."""
return dt.datetime.now(UTC)
def now(time_zone: Optional[dt.tzinfo] = None) -> dt.datetime:
"""Get now in specified time zone."""
return dt.datetime.now(time_zone or DEFAULT_TIME_ZONE)
def as_utc(dattim: dt.datetime) -> dt.datetime:
"""Return a datetime as UTC time.
Assumes datetime without tzinfo to be in the DEFAULT_TIME_ZONE.
"""
if dattim.tzinfo == UTC:
return dattim
if dattim.tzinfo is None:
dattim = DEFAULT_TIME_ZONE.localize(dattim) # type: ignore
return dattim.astimezone(UTC)
def as_timestamp(dt_value: dt.datetime) -> float:
"""Convert a date/time into a unix time (seconds since 1970)."""
if hasattr(dt_value, "timestamp"):
parsed_dt: Optional[dt.datetime] = dt_value
else:
parsed_dt = parse_datetime(str(dt_value))
if parsed_dt is None:
raise ValueError("not a valid date/time.")
return parsed_dt.timestamp()
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
if dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def utc_from_timestamp(timestamp: float) -> dt.datetime:
"""Return a UTC time from a timestamp."""
return UTC.localize(dt.datetime.utcfromtimestamp(timestamp))
def start_of_local_day(
dt_or_d: Union[dt.date, dt.datetime, None] = None
) -> dt.datetime:
"""Return local datetime object of start of day from date or datetime."""
if dt_or_d is None:
date: dt.date = now().date()
elif isinstance(dt_or_d, dt.datetime):
date = dt_or_d.date()
return DEFAULT_TIME_ZONE.localize( # type: ignore
dt.datetime.combine(date, dt.time())
)
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
def parse_datetime(dt_str: str) -> Optional[dt.datetime]:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws: Dict[str, Any] = match.groupdict()
if kws["microsecond"]:
kws["microsecond"] = kws["microsecond"].ljust(6, "0")
tzinfo_str = kws.pop("tzinfo")
tzinfo: Optional[dt.tzinfo] = None
if tzinfo_str == "Z":
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == "-":
offset = -offset
tzinfo = dt.timezone(offset)
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws["tzinfo"] = tzinfo
return dt.datetime(**kws)
def parse_date(dt_str: str) -> Optional[dt.date]:
"""Convert a date string to a date object."""
try:
return dt.datetime.strptime(dt_str, DATE_STR_FORMAT).date()
except ValueError: # If dt_str did not match our format
return None
def parse_time(time_str: str) -> Optional[dt.time]:
"""Parse a time string (00:20:00) into Time object.
Return None if invalid.
"""
parts = str(time_str).split(":")
if len(parts) < 2:
return None
try:
hour = int(parts[0])
minute = int(parts[1])
second = int(parts[2]) if len(parts) > 2 else 0
return dt.time(hour, minute, second)
except ValueError:
# ValueError if value cannot be converted to an int or not in range
return None
# Found in this gist: https://gist.github.com/zhangsen/1199964
def get_age(date: dt.datetime) -> str:
"""
Take a datetime and return its "age" as a string.
The age can be in second, minute, hour, day, month or year. Only the
biggest unit is considered, e.g. if it's 2 days and 3 hours, "2 days" will
be returned.
Make sure date is not in the future, or else it won't work.
"""
def formatn(number: int, unit: str) -> str:
"""Add "unit" if it's plural."""
if number == 1:
return f"1 {unit}"
return f"{number:d} {unit}s"
def q_n_r(first: int, second: int) -> Tuple[int, int]:
"""Return quotient and remaining."""
return first // second, first % second
delta = now() - date
day = delta.days
second = delta.seconds
year, day = q_n_r(day, 365)
if year > 0:
return formatn(year, "year")
month, day = q_n_r(day, 30)
if month > 0:
return formatn(month, "month")
if day > 0:
return formatn(day, "day")
hour, second = q_n_r(second, 3600)
if hour > 0:
return formatn(hour, "hour")
minute, second = q_n_r(second, 60)
if minute > 0:
return formatn(minute, "minute")
return formatn(second, "second")
def parse_time_expression(parameter: Any, min_value: int, max_value: int) -> List[int]:
"""Parse the time expression part and return a list of times to match."""
if parameter is None or parameter == MATCH_ALL:
res = list(range(min_value, max_value + 1))
elif isinstance(parameter, str) and parameter.startswith("/"):
parameter = int(parameter[1:])
res = [x for x in range(min_value, max_value + 1) if x % parameter == 0]
elif not hasattr(parameter, "__iter__"):
res = [int(parameter)]
else:
res = list(sorted(int(x) for x in parameter))
for val in res:
if val < min_value or val > max_value:
raise ValueError(
"Time expression '{}': parameter {} out of range ({} to {})"
"".format(parameter, val, min_value, max_value)
)
return res
# pylint: disable=redefined-outer-name
def find_next_time_expression_time(
now: dt.datetime, seconds: List[int], minutes: List[int], hours: List[int]
) -> dt.datetime:
"""Find the next datetime from now for which the time expression matches.
The algorithm looks at each time unit separately and tries to find the
next one that matches for each. If any of them would roll over, all
time units below that are reset to the first matching value.
Timezones are also handled (the tzinfo of the now object is used),
including daylight saving time.
"""
if not seconds or not minutes or not hours:
raise ValueError("Cannot find a next time: Time expression never " "matches!")
def _lower_bound(arr: List[int], cmp: int) -> Optional[int]:
"""Return the first value in arr greater or equal to cmp.
Return None if no such value exists.
"""
left = 0
right = len(arr)
while left < right:
mid = (left + right) // 2
if arr[mid] < cmp:
left = mid + 1
else:
right = mid
if left == len(arr):
return None
return arr[left]
result = now.replace(microsecond=0)
# Match next second
next_second = _lower_bound(seconds, result.second)
if next_second is None:
# No second to match in this minute. Roll-over to next minute.
next_second = seconds[0]
result += dt.timedelta(minutes=1)
result = result.replace(second=next_second)
# Match next minute
next_minute = _lower_bound(minutes, result.minute)
if next_minute != result.minute:
# We're in the next minute. Seconds needs to be reset.
result = result.replace(second=seconds[0])
if next_minute is None:
# No minute to match in this hour. Roll-over to next hour.
next_minute = minutes[0]
result += dt.timedelta(hours=1)
result = result.replace(minute=next_minute)
# Match next hour
next_hour = _lower_bound(hours, result.hour)
if next_hour != result.hour:
# We're in the next hour. Seconds+minutes needs to be reset.
result = result.replace(second=seconds[0], minute=minutes[0])
if next_hour is None:
# No minute to match in this day. Roll-over to next day.
next_hour = hours[0]
result += dt.timedelta(days=1)
result = result.replace(hour=next_hour)
if result.tzinfo is None:
return result
# Now we need to handle timezones. We will make this datetime object
# "naive" first and then re-convert it to the target timezone.
# This is so that we can call pytz's localize and handle DST changes.
tzinfo: pytzinfo.DstTzInfo = result.tzinfo
result = result.replace(tzinfo=None)
try:
result = tzinfo.localize(result, is_dst=None)
except pytzexceptions.AmbiguousTimeError:
# This happens when we're leaving daylight saving time and local
# clocks are rolled back. In this case, we want to trigger
# on both the DST and non-DST time. So when "now" is in the DST
# use the DST-on time, and if not, use the DST-off time.
use_dst = bool(now.dst())
result = tzinfo.localize(result, is_dst=use_dst)
except pytzexceptions.NonExistentTimeError:
# This happens when we're entering daylight saving time and local
# clocks are rolled forward, thus there are local times that do
# not exist. In this case, we want to trigger on the next time
# that *does* exist.
# In the worst case, this will run through all the seconds in the
# time shift, but that's max 3600 operations for once per year
result = result.replace(tzinfo=tzinfo) + dt.timedelta(seconds=1)
return find_next_time_expression_time(result, seconds, minutes, hours)
result_dst = cast(dt.timedelta, result.dst())
now_dst = cast(dt.timedelta, now.dst())
if result_dst >= now_dst:
return result
# Another edge-case when leaving DST:
# When now is in DST and ambiguous *and* the next trigger time we *should*
# trigger is ambiguous and outside DST, the excepts above won't catch it.
# For example: if triggering on 2:30 and now is 28.10.2018 2:30 (in DST)
# we should trigger next on 28.10.2018 2:30 (out of DST), but our
# algorithm above would produce 29.10.2018 2:30 (out of DST)
# Step 1: Check if now is ambiguous
try:
tzinfo.localize(now.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# Step 2: Check if result of (now - DST) is ambiguous.
check = now - now_dst
check_result = find_next_time_expression_time(check, seconds, minutes, hours)
try:
tzinfo.localize(check_result.replace(tzinfo=None), is_dst=None)
return result
except pytzexceptions.AmbiguousTimeError:
pass
# OK, edge case does apply. We must override the DST to DST-off
check_result = tzinfo.localize(check_result.replace(tzinfo=None), is_dst=False)
return check_result
| 33.879032 | 87 | 0.64929 | [
"Apache-2.0"
] | 854562/home-assistant | homeassistant/util/dt.py | 12,603 | Python |
from cpu_element import CPU_element
from elements import Instruction_pointer
from tests.tools import set_signals
signals = ["address"]
result = "result"
def test_write_output():
source = CPU_element([], signals)
ip = Instruction_pointer(signals, [result])
assert isinstance(ip, CPU_element)
ip.connect([source])
value = 55
set_signals(source, ip, signals, [value])
assert ip.outputs[result] == value
| 25.352941 | 47 | 0.723898 | [
"MIT"
] | chagulend/MIPS-Sim | src/tests/test_instruction_pointer.py | 431 | Python |
#!/usr/bin/python3
from subprocess import Popen, PIPE
import re
import argparse
import signal
import random
import socket
from collections import defaultdict
import sys
import subprocess
try:
import picamera
except ImportError:
subprocess.call([sys.executable, "-m", "pip", "install", "picamera"])
finally:
import picamera
random.seed(None)
def createsdp(hostname, streams):
params2ignore = set(['encoding-name', 'timestamp-offset', 'payload', 'clock-rate', 'media', 'port'])
sdp = ['v=0']
sdp.append('o=- %d %d IN IP4 %s' % (random.randrange(4294967295), 2, hostname))
sdp.append('t=0 0')
sdp.append('s=GST2SDP')
streamnumber = 1
# add individual streams to SDP
for stream in streams:
sdp.append("m=%s %s RTP/AVP %s" % (stream['media'], stream['port'], stream['payload']))
sdp.append('c=IN IP4 %s' % hostname)
sdp.append("a=rtpmap:%s %s/%s" % (stream['payload'], stream['encoding-name'], stream['clock-rate']))
fmtp = ["a=fmtp:%s" % stream['payload']]
for param, value in stream.items():
# is parameter an action?
if param[0] == 'a' and param[1] == '-':
aparam = "%s:%s" % (param.replace('a-', 'a='), value)
sdp.append(aparam)
else:
if param not in params2ignore:
fmtp.append(" %s=%s;" % (param, value))
fmtp = ''.join(fmtp)
sdp.append(fmtp)
sdp.append("a=control:track%d" % streamnumber)
streamnumber += 1
# save sdp
with open('session.sdp', 'w') as f:
f.write('\r\n'.join(sdp))
def main(arguments):
gstreamer = 'gst-launch-1.0.exe' if 'win' in sys.platform else 'gst-launch-1.0'
hostname = arguments.hostname[0]
encoders = {'h264': (b'GstRtpH264Pay', 'h264parse', 'rtph264pay'),
'vp8': (b'GstRtpVP8Pay', 'vp8enc', 'rtpvp8pay'),
'openh264': (b'GstRtpH264Pay', 'openh264enc', 'rtph264pay')}
rtppay = encoders[arguments.codec][0]
# port = arguments.port
port = 5600
arglist = [gstreamer, "-v", "rpicamsrc", "bitrate=%d" % 1500000,
"!", "video/x-raw, width=1280, height=720, framerate=30/1",
"!", "omxh264enc", "!", encoders[arguments.codec][1], "!", encoders[arguments.codec][2],
"!", "multiudpsink", "clients="]
for hostname in arguments.hostname:
arglist[14] += hostname + ":5600" + ","
print(arglist)
if arguments.debug:
print("Calling gstreamer:\n", " ".join(arglist))
process = Popen(arglist, stdout=PIPE)
def signal_handler(signal, frame):
process.kill()
print('Terminating child process')
signal.signal(signal.SIGINT, signal_handler)
patternGenerated = False
try:
p = re.compile(rb'/GstPipeline:pipeline\d+/%b:\w+\d+.GstPad:src: caps = (.+)' % rtppay)
for line in process.stdout:
pattern = p.search(line)
if pattern and not patternGenerated:
parameters = re.findall(rb'(([\w-]+)=(?:\(\w+\))?(?:(\w+)|(?:"([^"]+)")))', pattern.groups()[0])
# print(parameters)
parammap = defaultdict(str)
for (_, param, value, value2) in parameters:
parammap[param.decode('ascii')] = value.decode('ascii') if value else value2.decode('ascii')
parammap['port'] = port
if len(parammap) > 0:
patternGenerated = True
if arguments.sdp:
createsdp(hostname, [parammap])
for param, value in parammap.items():
print("%s = %s" % (param, value))
finally:
process.wait()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("hostname", help="hostname or IP address of the destination", nargs='+')
parser.add_argument("--sdp", help="generates SDP file for the stream (defaults to false)", action="store_true")
parser.add_argument("--debug", help="shows command line in use to call gstreamer", action="store_true")
parser.add_argument("--port", "-p", help="port (defaults to 5000)", type=int, default=5000)
parser.add_argument("--codec", help="chooses encoder (defaults to openh264)", choices=['vp8', 'h264', 'openh264'],
default='openh264')
parser.add_argument("--camera", help="Device id (defaults to 0)", type=int, default=0)
args = parser.parse_args()
# args.hostname = socket.gethostbyname(args.hostname)
print("Using hostname %s using device %d" % (args.hostname, args.camera))
main(args)
| 38.295082 | 118 | 0.581978 | [
"MIT"
] | Ethan7102/FYP | other/gstreamcam.py | 4,672 | Python |
import os
from typing import Optional
from pytorchltr.utils.downloader import DefaultDownloadProgress
from pytorchltr.utils.downloader import Downloader
from pytorchltr.utils.file import validate_and_download
from pytorchltr.utils.file import extract_zip
from pytorchltr.utils.file import dataset_dir
from pytorchltr.datasets.svmrank.svmrank import SVMRankDataset
class MSLR10K(SVMRankDataset):
"""
Utility class for downloading and using the MSLR-WEB10K dataset:
https://www.microsoft.com/en-us/research/project/mslr/.
This dataset is a smaller sampled version of the MSLR-WEB30K dataset.
"""
downloader = Downloader(
url="https://api.onedrive.com/v1.0/shares/s!AtsMfWUz5l8nbOIoJ6Ks0bEMp78/root/content", # noqa: E501
target="MSLR-WEB10K.zip",
sha256_checksum="2902142ea33f18c59414f654212de5063033b707d5c3939556124b1120d3a0ba", # noqa: E501
progress_fn=DefaultDownloadProgress(),
postprocess_fn=extract_zip)
per_fold_expected_files = {
1: [
{"path": "Fold1/train.txt", "sha256": "6eb3fae4e1186e1242a6520f53a98abdbcde5b926dd19a28e51239284b1d55dc"}, # noqa: E501
{"path": "Fold1/test.txt", "sha256": "33fe002374a4fce58c4e12863e4eee74745d5672a26f3e4ddacc20ccfe7d6ba0"}, # noqa: E501
{"path": "Fold1/vali.txt", "sha256": "e86fb3fe7e8a5f16479da7ce04f783ae85735f17f66016786c3ffc797dd9d4db"} # noqa: E501
],
2: [
{"path": "Fold2/train.txt", "sha256": "40e4a2fcc237d9c164cbb6a3f2fa91fe6cf7d46a419d2f73e21cf090285659eb"}, # noqa: E501
{"path": "Fold2/test.txt", "sha256": "44add582ccd674cf63af24d3bf6e1074e87a678db77f00b44c37980a3010917a"}, # noqa: E501
{"path": "Fold2/vali.txt", "sha256": "33fe002374a4fce58c4e12863e4eee74745d5672a26f3e4ddacc20ccfe7d6ba0"} # noqa: E501
],
3: [
{"path": "Fold3/train.txt", "sha256": "f13005ceb8de0db76c93b02ee4b2bded6f925097d3ab7938931e8d07aa72acd7"}, # noqa: E501
{"path": "Fold3/test.txt", "sha256": "c0a5a3c6bd7790d0b4ff3d5e961d0c8c5f8ff149089ce492540fa63035801b7a"}, # noqa: E501
{"path": "Fold3/vali.txt", "sha256": "44add582ccd674cf63af24d3bf6e1074e87a678db77f00b44c37980a3010917a"} # noqa: E501
],
4: [
{"path": "Fold4/train.txt", "sha256": "6c1677cf9b2ed491e26ac6b8c8ca7dfae9c1a375e2bce8cba6df36ab67ce5836"}, # noqa: E501
{"path": "Fold4/test.txt", "sha256": "dc6083c24a5f0c03df3c91ad3eed7542694115b998acf046e51432cb7a22b848"}, # noqa: E501
{"path": "Fold4/vali.txt", "sha256": "c0a5a3c6bd7790d0b4ff3d5e961d0c8c5f8ff149089ce492540fa63035801b7a"} # noqa: E501
],
5: [
{"path": "Fold5/train.txt", "sha256": "4249797a2f0f46bff279973f0fb055d4a78f67f337769eabd56e82332c044794"}, # noqa: E501
{"path": "Fold5/test.txt", "sha256": "e86fb3fe7e8a5f16479da7ce04f783ae85735f17f66016786c3ffc797dd9d4db"}, # noqa: E501
{"path": "Fold5/vali.txt", "sha256": "dc6083c24a5f0c03df3c91ad3eed7542694115b998acf046e51432cb7a22b848"} # noqa: E501
]
}
splits = {
"train": "train.txt",
"test": "test.txt",
"vali": "vali.txt"
}
def __init__(self, location: str = dataset_dir("MSLR10K"),
split: str = "train", fold: int = 1, normalize: bool = True,
filter_queries: Optional[bool] = None, download: bool = True,
validate_checksums: bool = True):
"""
Args:
location: Directory where the dataset is located.
split: The data split to load ("train", "test" or "vali")
fold: Which data fold to load (1...5)
normalize: Whether to perform query-level feature
normalization.
filter_queries: Whether to filter out queries that
have no relevant items. If not given this will filter queries
for the test set but not the train set.
download: Whether to download the dataset if it does not
exist.
validate_checksums: Whether to validate the dataset files
via sha256.
"""
# Check if specified split and fold exists.
if split not in MSLR10K.splits.keys():
raise ValueError("unrecognized data split '%s'" % str(split))
if fold not in MSLR10K.per_fold_expected_files.keys():
raise ValueError("unrecognized data fold '%s'" % str(fold))
# Validate dataset exists and is correct, or download it.
validate_and_download(
location=location,
expected_files=MSLR10K.per_fold_expected_files[fold],
downloader=MSLR10K.downloader if download else None,
validate_checksums=validate_checksums)
# Only filter queries on non-train splits.
if filter_queries is None:
filter_queries = False if split == "train" else True
# Initialize the dataset.
datafile = os.path.join(location, "Fold%d" % fold,
MSLR10K.splits[split])
super().__init__(file=datafile, sparse=False, normalize=normalize,
filter_queries=filter_queries, zero_based="auto")
| 51.145631 | 132 | 0.658314 | [
"MIT"
] | SuperXiang/pytorchltr | pytorchltr/datasets/svmrank/mslr10k.py | 5,268 | Python |
from setuptools import find_packages, setup
setup(
name="src",
packages=find_packages(),
version="0.3.0",
description="Estimating the impact of COVID policy on disease spread",
author="Global Policy Lab",
license="MIT",
)
| 22.454545 | 74 | 0.688259 | [
"MIT"
] | fergald/gpl-covid | code/setup.py | 247 | Python |
# -*- coding: utf-8 -*-
import zeit.wochenende.interfaces
import zeit.wochenende.testing
from zeit.cms.testcontenttype.testcontenttype import ExampleContentType
class SectionTest(zeit.wochenende.testing.FunctionalTestCase):
def test_zwe_ressort_content_is_zwe_content(self):
content = ExampleContentType()
content.ressort = u'Wochenende'
self.repository['zwecenterpage'] = content
content = self.repository['zwecenterpage']
self.assertTrue(zeit.wochenende.interfaces.IZWEContent.providedBy(
content))
| 35 | 74 | 0.744643 | [
"BSD-3-Clause"
] | rickdg/vivi | core/src/zeit/wochenende/tests/test_interfaces.py | 560 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,glob
from setuptools import setup, find_packages
setup(
name = 'qcon',
version = '2.5',
scripts=['qcon/qcon.py'],
packages=['qcon'],
author = 'pawnhearts',
author_email = 'ph@kotchan.org',
description = """qcon is program for hiding/showing terminal emulators(or other software) with a hotkey.""",
long_description = """qcon is program for hiding/showing terminal emulators(or other software) with a hotkey.
Similar to consoles you see in many FPS games.
Unlike similar projects like guake/yakuake you can use any terminal emulator of your choice.
Several terminals(or other software) can be configured.
It's compact and consists of a single file.""",
classifiers=[
'Environment :: X11 Applications',
'Intended Audience :: End Users/Desktop',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Terminals :: Terminal Emulators/X Terminals',
],
license = "MIT",
url = 'https://github.com/pawnhearts/qcon',
download_url = 'https://github.com/pawnhearts/qcon/archive/master.zip',
keywords = "terminal gtk",
platforms = "POSIX",
maintainer = 'pawnhearts',
maintainer_email = 'ph@kotchan.org',
)
| 32.871795 | 113 | 0.673167 | [
"MIT"
] | pawnhearts/qcon | setup.py | 1,282 | Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = "Ricardo Ribeiro"
__credits__ = ["Ricardo Ribeiro"]
__license__ = "MIT"
__version__ = "0.0"
__maintainer__ = "Ricardo Ribeiro"
__email__ = "ricardojvr@gmail.com"
__status__ = "Development"
import pyforms.Utils.tools as tools
from PyQt4 import uic, QtGui, QtCore
from pyforms.gui.Controls.ControlBase import ControlBase
class ControlCheckBox(ControlBase):
def initForm(self):
control_path = tools.getFileInSameDirectory(__file__,"checkbox.ui")
self._form = uic.loadUi( control_path )
self._form.checkBox.setText(self._label)
self._form.checkBox.stateChanged.connect(self.__checkedToggle)
self._form.checkBox.stateChangedFname = None
if self._value and self._value!='':
self._form.checkBox.setCheckState( QtCore.Qt.Checked )
else:
self._form.checkBox.setCheckState( QtCore.Qt.Unchecked )
def __checkedToggle(self):
func_name = self._form.checkBox.stateChangedFname
if callable(func_name):
try:
func_name()
except:
import sys
print sys.exc_info()[0]
self.changed()
def load(self, data):
if 'value' in data:
self._form.checkBox.setChecked( data['value']=='True' )
def save(self, data):
data['value'] = str( self.isChecked() )
@property
def value(self): return self._form.checkBox.isChecked()
@value.setter
def value(self, value):
ControlBase.value.fset(self,value)
self._form.checkBox.setChecked(value)
@property
def stateChangedFunction(self):
return self._form.checkBox.stateChangedFname
@stateChangedFunction.setter
def stateChangedFunction(self, value):
self._form.checkBox.stateChangedFname = value
| 27.060606 | 75 | 0.68533 | [
"MIT"
] | sunj1/my_pyforms | pyforms/gui/Controls/ControlCheckBox.py | 1,786 | Python |
from django.urls import path
from . import views
urlpatterns = [
path('', views.post, name='post'),
path('post/<int:pk>/', views.detail, name='detail'),
path('post/new', views.new_post, name='new_post'),
path('post/<int:pk>/edit/', views.edit_post, name='edit_post'),
path('drafts/', views.draft_post, name='draft_post'),
path('post/<int:pk>/publish', views.publish_post, name='publish_post'),
path('post/<int:pk>/delete/', views.delete_post, name='delete_post'),
] | 39.5 | 72 | 0.689873 | [
"Apache-2.0"
] | MiKueen/Django | bloggers/blog/urls.py | 474 | Python |
# Importamos testcase
from flask_testing import TestCase
# Importamos nuesra app desde el main de nuestra aplicacion
from main import app
from flask import current_app, url_for
# Creamos una nueva clase que se llama main test
class MainTest(TestCase):
# Creamos el metodo create app que retorna una aplicacion de flask
def create_app(self):
# Configuramos nuestra app para testing de esta manera flask reconoce que se trata de un ambiente de pruebas
app.config['TESTING'] = True
# Indicamos que no vamos a utilizar el Cross-site request forgery toquen
# porque en este caso no tenemos una sesión activa del usuario.
app.config['WTF_CSRF_ENABLED'] = False
app.config['PRESERVE_CONTEXT_ON_EXCEPTION'] = False
return app
# Probamos que de hecho nuestra app de Flash existe
def test_app_exist(self):
self.assertIsNotNone(current_app)
# Validamos que nuestra app de flask se encuente en modo testing
def test_app_in_test_mode(self):
self.assertTrue(current_app.config['TESTING'])
# Validamos que la redireccion del index sea correcta
def test_index_redirect(self):
response = self.client.get(url_for('index'))
self.assertRedirects(response, url_for('Hello'))
# Validamos que Hello nos regrese 200 en cuanto hacemos un get()
def test_hello_get(self):
response = self.client.get(url_for('Hello'))
self.assert200(response)
# Validamos como realizar un post de la manera correcta
def test_hello_post(self):
# Generamos un response donde en la funcion Hello posteamos los datos de la forma
response = self.client.post(url_for('Hello'))
# Y con assert validamos que al ingresar los datos correctos seamos redirigidos al index
self.assertTrue(response.status_code, 405)
# Test para validar que exista un blueprint
def test_auth_blueprint_exists(self):
self.assertIn('auth', self.app.blueprints)
# Validamos que la respuesta del login sea un 200
def test_auth_login_get(self):
# En este caso debemos ir al blueprint de auth en login
response = self.client.get(url_for('auth.login'))
self.assert200(response)
# Validamos que se halla renderizado el template adecuado
def test_auth_login_template(self):
# En este caso debemos ir al blueprint de auth en login
self.client.get(url_for('auth.login'))
self.assertTemplateUsed('login.html')
def test_auth_login_post(self):
# En este caso es necesario crear una forma para los espacios del formulario
fake_form = {
'username': 'Fakeusername',
'password': 'Fakepassword'
}
response = self.client.post(url_for('auth.login'), data=fake_form)
self.assertRedirects(response, url_for('index')) | 42.191176 | 116 | 0.699895 | [
"MIT"
] | NicolasImplant/python-flask | tests/test_base.py | 2,870 | Python |
import codecs
from setuptools import setup
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name="shadowsocksr-cli",
version="2.1.1",
author="tyrantlucifer",
author_email="tyrantlucifer@gmail.com",
description="The command client of shadowsocksr",
url="https://github.com/tyrantlucifer/ssr-command-client",
packages=[
"shadowsocksr_cli",
"shadowsocksr_cli.shadowsocks",
"shadowsocksr_cli.shadowsocks.crypto",
"shadowsocksr_cli.shadowsocks.obfsplugin"
],
entry_points={
'console_scripts': [
'shadowsocksr-cli = shadowsocksr_cli.main:main'
]
},
install_requires=[
"requests",
"prettytable",
"PySocks",
"qrcode",
"pyyaml",
"colorama"
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: Proxy Servers',
],
long_description=long_description,
)
| 30.833333 | 70 | 0.602027 | [
"MIT"
] | weakeng/ssr-command-client | setup.py | 1,480 | Python |
import re
import pygtrie as trie # type: ignore
from functools import reduce
__ALL__ = ('PrefixTokenizers', 'PrefixSearch')
class PrefixTokenizers:
# This string here is just an arbitrary long string so that
# re.split finds no matches and returns the entire phrase
ENTIRE_PHRASE = '::gooey/tokenization/entire-phrase'
# \s == any whitespace character
WORDS = r'\s'
@classmethod
def REGEX(cls, expression):
return expression
class OperatorType:
AND = 'AND'
OR = 'OR'
class SearchOptions:
def __init__(self,
choice_tokenizer=PrefixTokenizers.ENTIRE_PHRASE,
input_tokenizer=PrefixTokenizers.ENTIRE_PHRASE,
ignore_case=True,
operator='AND',
index_suffix= False,
**kwargs):
self.choice_tokenizer = choice_tokenizer
self.input_tokenizer = input_tokenizer
self.ignore_case = ignore_case
self.operator = operator
self.index_suffix = index_suffix
class PrefixSearch(object):
"""
A trie backed index for quickly finding substrings
in a list of options.
"""
def __init__(self, choices, options={}, *args, **kwargs):
self.choices = sorted(filter(None, choices))
self.options: SearchOptions = SearchOptions(**options)
self.searchtree = self.buildSearchTrie(choices)
def updateChoices(self, choices):
self.choices = sorted(filter(None, choices))
self.searchtree = self.buildSearchTrie(choices)
def findMatches(self, token):
if not token:
return sorted(self.choices)
tokens = self.tokenizeInput(token)
matches = [set(flatten(self._vals(self.searchtree, prefix=t))) for t in tokens]
op = intersection if self.options.operator == 'AND' else union
return sorted(reduce(op, matches))
def tokenizeInput(self, token):
"""
Cleans and tokenizes the user's input.
empty characters and spaces are trimmed to prevent
matching all paths in the index.
"""
return list(filter(None, re.split(self.options.input_tokenizer, self.clean(token))))
def tokenizeChoice(self, choice):
"""
Splits the `choice` into a series of tokens based on
the user's criteria.
If suffix indexing is enabled, the individual tokens
are further broken down and indexed by their suffix offsets. e.g.
'Banana', 'anana', 'nana', 'ana'
"""
choice_ = self.clean(choice)
tokens = re.split(self.options.choice_tokenizer, choice_)
if self.options.index_suffix:
return [token[i:]
for token in tokens
for i in range(len(token) - 2)]
else:
return tokens
def clean(self, text):
return text.lower() if self.options.ignore_case else text
def buildSearchTrie(self, choices):
searchtrie = trie.Trie()
for choice in choices:
for token in self.tokenizeChoice(choice):
if not searchtrie.has_key(token):
searchtrie[token] = []
searchtrie[token].append(choice)
return searchtrie
def _vals(self, searchtrie, **kwargs):
try:
return searchtrie.values(**kwargs)
except KeyError:
return []
def intersection(a, b):
return a.intersection(b)
def union(a, b):
return a.union(b)
def flatten(xs):
return [item for x in xs for item in x]
| 29.438017 | 92 | 0.618754 | [
"MIT"
] | QuantumSpatialInc/Gooey | gooey/gui/components/filtering/prefix_filter.py | 3,562 | Python |
from chainer import backend
from chainer import function
from chainer import utils
from chainer.utils import type_check
class DeCov(function.Function):
"""DeCov loss (https://arxiv.org/abs/1511.06068)"""
def __init__(self, reduce='half_squared_sum'):
self.h_centered = None
self.covariance = None
if reduce not in ('half_squared_sum', 'no'):
raise ValueError(
"only 'half_squared_sum' and 'no' are valid "
"for 'reduce', but '%s' is given" % reduce)
self.reduce = reduce
def check_type_forward(self, in_types):
type_check._argname(in_types, ('h',))
h_type, = in_types
type_check.expect(
h_type.dtype.kind == 'f',
h_type.ndim == 2,
)
def forward(self, inputs):
xp = backend.get_array_module(*inputs)
h, = inputs
self.h_centered = h - h.mean(axis=0, keepdims=True)
self.covariance = self.h_centered.T.dot(self.h_centered)
xp.fill_diagonal(self.covariance, 0.0)
self.covariance /= len(h)
if self.reduce == 'half_squared_sum':
cost = xp.vdot(self.covariance, self.covariance)
cost *= h.dtype.type(0.5)
return utils.force_array(cost),
else:
return self.covariance,
def backward(self, inputs, grad_outputs):
xp = backend.get_array_module(*inputs)
h, = inputs
gcost, = grad_outputs
gcost_div_n = gcost / gcost.dtype.type(len(h))
if self.reduce == 'half_squared_sum':
gh = 2.0 * self.h_centered.dot(self.covariance)
gh *= gcost_div_n
else:
xp.fill_diagonal(gcost_div_n, 0.0)
gh = self.h_centered.dot(gcost_div_n + gcost_div_n.T)
return gh,
def decov(h, reduce='half_squared_sum'):
"""Computes the DeCov loss of ``h``
The output is a variable whose value depends on the value of
the option ``reduce``. If it is ``'no'``, it holds a matrix
whose size is same as the number of columns of ``y``.
If it is ``'half_squared_sum'``, it holds the half of the
squared Frobenius norm (i.e. squared of the L2 norm of a matrix flattened
to a vector) of the matrix.
Args:
h (:class:`~chainer.Variable` or :ref:`ndarray`):
Variable holding a matrix where the first dimension
corresponds to the batches.
recude (str): Reduction option. Its value must be either
``'half_squared_sum'`` or ``'no'``.
Otherwise, :class:`ValueError` is raised.
Returns:
~chainer.Variable:
A variable holding a scalar of the DeCov loss.
If ``reduce`` is ``'no'``, the output variable holds
2-dimensional array matrix of shape ``(N, N)`` where
``N`` is the number of columns of ``y``.
If it is ``'half_squared_sum'``, the output variable
holds a scalar value.
.. note::
See https://arxiv.org/abs/1511.06068 for details.
"""
return DeCov(reduce)(h)
| 33.89011 | 77 | 0.597601 | [
"MIT"
] | Anyz01/chainer | chainer/functions/loss/decov.py | 3,084 | Python |
def my_func1(x: int, y: int) -> int:
...
bit: BitVec[1]
myu32: BitVec[32]
myu64: BitVec[64]
| 12.25 | 36 | 0.591837 | [
"MIT"
] | MiguelMarcelino/py2many | tests/cases/smt_types.py | 98 | Python |
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 15:39, 20/04/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from opfunu.cec.cec2005.root import Root
from numpy import sum, dot, cos, exp, pi, e, sqrt
class Model(Root):
def __init__(self, f_name="Shifted Rotated Ackley's Function with Global Optimum on Bounds", f_shift_data_file="data_ackley",
f_ext='.txt', f_bias=-140, f_matrix=None):
Root.__init__(self, f_name, f_shift_data_file, f_ext, f_bias)
self.f_matrix = f_matrix
def _main__(self, solution=None):
problem_size = len(solution)
if problem_size > 100:
print("CEC 2005 not support for problem size > 100")
return 1
if problem_size == 10 or problem_size == 30 or problem_size == 50:
self.f_matrix = "ackley_M_D" + str(problem_size)
else:
print("CEC 2005 F8 function only support problem size 10, 30, 50")
return 1
shift_data = self.load_shift_data()[:problem_size]
t1 = int(problem_size/2)
for j in range(0, t1-1):
shift_data[2*(j+1)-1] = -32 * shift_data[2*(j+1)]
matrix = self.load_matrix_data(self.f_matrix)
z = dot((solution - shift_data), matrix)
result = -20 * exp(-0.2 * sum(z ** 2) / problem_size) - exp(sum(cos(2 * pi * z))) + 20 + e
return result + self.f_bias
| 46.318182 | 129 | 0.459274 | [
"MIT"
] | ElliottP-13/opfunu | opfunu/cec/cec2005/F8.py | 2,038 | Python |
from data.indicator.SE2.process import WB_computation_config, SDG_computation_config
| 42.5 | 84 | 0.894118 | [
"MIT"
] | simonzabrocki/Anticipe | data/indicator/SE2/__init__.py | 85 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for source_remote."""
import os
import traceback
import grpc
from tensorflow.core.debug import debug_service_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import source_remote
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.util import tf_inspect
def line_number_above():
return tf_inspect.stack()[1][2] - 1
class SendTracebacksTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
test_util.TensorFlowTestCase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
cls._server_address = "localhost:%d" % cls._server_port
(cls._server_port_2, cls._debug_server_url_2, cls._server_dump_dir_2,
cls._server_thread_2,
cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread()
cls._server_address_2 = "localhost:%d" % cls._server_port_2
cls._curr_file_path = os.path.normpath(os.path.abspath(__file__))
@classmethod
def tearDownClass(cls):
# Stop the test server and join the thread.
cls._server.stop_server().wait()
cls._server_thread.join()
cls._server_2.stop_server().wait()
cls._server_thread_2.join()
test_util.TensorFlowTestCase.tearDownClass()
def tearDown(self):
ops.reset_default_graph()
self._server.clear_data()
self._server_2.clear_data()
super(SendTracebacksTest, self).tearDown()
def _findFirstTraceInsideTensorFlowPyLibrary(self, op):
"""Find the first trace of an op that belongs to the TF Python library."""
for trace in op.traceback:
if source_utils.guess_is_tensorflow_py_library(trace.filename):
return trace
def testSendGraphTracebacksToSingleDebugServer(self):
this_func_name = "testSendGraphTracebacksToSingleDebugServer"
with session.Session() as sess:
a = variables.Variable(21.0, name="a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="b")
b_lineno = line_number_above()
math_ops.add(a, b, name="x")
x_lineno = line_number_above()
send_stack = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
self._server_address, "dummy_run_key", send_stack, sess.graph)
tb = self._server.query_op_traceback("a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
self.assertEqual(
" a = variables.Variable(21.0, name=\"a\")",
self._server.query_source_file_line(__file__, a_lineno))
# Files in the TensorFlow code base shouldn not have been sent.
tf_trace = self._findFirstTraceInsideTensorFlowPyLibrary(a.op)
tf_trace_file_path = tf_trace.filename
with self.assertRaises(ValueError):
self._server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
self._server.query_call_types())
self.assertEqual(["dummy_run_key"], self._server.query_call_keys())
self.assertEqual(
[sess.graph.version], self._server.query_graph_versions())
def testSendGraphTracebacksToTwoDebugServers(self):
this_func_name = "testSendGraphTracebacksToTwoDebugServers"
with session.Session() as sess:
a = variables.Variable(21.0, name="two/a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="two/b")
b_lineno = line_number_above()
x = math_ops.add(a, b, name="two/x")
x_lineno = line_number_above()
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
with test.mock.patch.object(
grpc, "insecure_channel",
wraps=grpc.insecure_channel) as mock_grpc_channel:
source_remote.send_graph_tracebacks(
[self._server_address, self._server_address_2],
"dummy_run_key", send_traceback, sess.graph)
mock_grpc_channel.assert_called_with(
test.mock.ANY,
options=[("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)])
servers = [self._server, self._server_2]
for server in servers:
tb = server.query_op_traceback("two/a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
server.query_origin_stack()[-1])
self.assertEqual(
" x = math_ops.add(a, b, name=\"two/x\")",
server.query_source_file_line(__file__, x_lineno))
tf_trace = self._findFirstTraceInsideTensorFlowPyLibrary(a.op)
tf_trace_file_path = tf_trace.filename
with self.assertRaises(ValueError):
server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
server.query_call_types())
self.assertEqual(["dummy_run_key"], server.query_call_keys())
self.assertEqual([sess.graph.version], server.query_graph_versions())
def testSendEagerTracebacksToSingleDebugServer(self):
this_func_name = "testSendEagerTracebacksToSingleDebugServer"
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_eager_tracebacks(self._server_address, send_traceback)
self.assertEqual([debug_service_pb2.CallTraceback.EAGER_EXECUTION],
self._server.query_call_types())
self.assertIn((self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
def testGRPCServerMessageSizeLimit(self):
"""Assert gRPC debug server is started with unlimited message size."""
with test.mock.patch.object(
grpc, "server", wraps=grpc.server) as mock_grpc_server:
(_, _, _, server_thread,
server) = grpc_debug_test_server.start_server_on_separate_thread(
poll_server=True)
mock_grpc_server.assert_called_with(
test.mock.ANY,
options=[("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)])
server.stop_server().wait()
server_thread.join()
if __name__ == "__main__":
googletest.main()
| 42.056122 | 88 | 0.711998 | [
"Apache-2.0"
] | 05259/tensorflow | tensorflow/python/debug/lib/source_remote_test.py | 8,243 | Python |
# authors_name = 'Preetham Ganesh'
# project_title = 'Multi Sensor-based Human Activity Recognition using OpenCV and Sensor Fusion'
# email = 'preetham.ganesh2015@gmail.com'
import numpy as np
import os
import pandas as pd
import itertools
import logging
import sklearn.pipeline
from sklearn.metrics import accuracy_score
from sklearn.metrics import balanced_accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import ParameterGrid
logging.getLogger('sklearn').setLevel(logging.FATAL)
def list_combinations_generator(modalities: list):
"""Generates combinations for items in the given list.
Args:
modalities: List of modalities available in the dataset.
Returns:
Combinations of items in the given list.
"""
modality_combinations = list()
# Iterates across modalities to generate combinations based on length.
for length in range(1, len(modalities) + 1):
# Generate combinations for the current length.
current_length_combinations = itertools.combinations(modalities, length)
# Iterates across the generated combinations to convert it into a list.
for combination in current_length_combinations:
current_combination_list = list()
for k in combination:
current_combination_list.append(k)
modality_combinations.append(current_combination_list)
return modality_combinations
def data_combiner(n_actions: int,
subject_ids: list,
n_takes: int,
modalities: list,
skeleton_pose_model: str):
"""Combines skeleton point information for all actions, all takes, given list of subject ids and given list of
modalities.
Args:
n_actions: Total number of actions in the original dataset.
subject_ids: List of subjects in the current set.
n_takes: Total number of takes in the original dataset.
modalities: Current combination of modalities.
skeleton_pose_model: Current skeleton pose model name which will be used to import skeleton point
information.
Returns:
A pandas dataframe which contains combined skeleton point information for all actions, all takes, given list
of subject ids and given list of modalities.
"""
combined_modality_skeleton_information = pd.DataFrame()
# Iterates across actions, subject_ids, takes, and modalities to combine skeleton point information.
for i in range(1, n_actions + 1):
for j in range(len(subject_ids)):
for k in range(1, n_takes + 1):
data_name = 'a{}_s{}_t{}'.format(i, subject_ids[j], k)
# Iterates across modalities to import skeleton point information file and adds it to
# combined_modality_skeleton_information. If file not found, it moves on to the next combination.
try:
# Imports 1st modality's skeleton point information for current data_name and skeleton_pose_model.
current_data_name_modality_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[0], data_name, skeleton_pose_model))
except FileNotFoundError:
continue
# Since, length of modalities in each combination is different. Hence, if length of modalities is
# greater than 1, then the imported skeleton point information for other modalities will be merged to
# the skeleton point information for the first modality.
if len(modalities) != 1:
for m in range(1, len(modalities)):
current_skeleton_point_information = pd.read_csv('../data/normalized_data/{}/{}_{}.csv'.format(
modalities[m], data_name, skeleton_pose_model))
current_data_name_modality_information = pd.merge(current_data_name_modality_information,
current_skeleton_point_information,
on='frame', how='outer')
# Adds data_name to the imported skeleton point information.
current_data_name_modality_information['data_name'] = [data_name for _ in range(len(
current_data_name_modality_information))]
# Removes frame column from the imported skeleton point information.
current_data_name_modality_information = current_data_name_modality_information.drop(columns=['frame'])
# Adds action column to the imported skeleton point information.
current_data_name_modality_information['action'] = [i for _ in range(len(
current_data_name_modality_information))]
# Appends currently imported & modified skeleton point information to the combined modality skeleton
# point information
combined_modality_skeleton_information = combined_modality_skeleton_information.append(
current_data_name_modality_information)
return combined_modality_skeleton_information
def calculate_metrics(actual_values: np.ndarray,
predicted_values: np.ndarray):
"""Using actual_values, predicted_values calculates metrics such as accuracy, balanced accuracy, precision, recall,
and f1 scores.
Args:
actual_values: Actual action labels in the dataset
predicted_values: Action labels predicted by the currently trained model
Returns:
Dictionary contains keys as score names and values as scores which are floating point values.
"""
return {'accuracy_score': round(accuracy_score(actual_values, predicted_values) * 100, 3),
'balanced_accuracy_score': round(balanced_accuracy_score(actual_values, predicted_values) * 100, 3),
'precision_score': round(precision_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'recall_score': round(recall_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3),
'f1_score': round(f1_score(actual_values, predicted_values, average='weighted',
labels=np.unique(predicted_values)) * 100, 3)}
def retrieve_hyperparameters(current_model_name: str):
"""Based on the current_model_name returns a list of hyperparameters used for optimizing the model (if necessary).
Args:
current_model_name: Name of the model currently expected to be trained
Returns:
A dictionary containing the hyperparameter name and the values that will be used to optimize the model
"""
# For support_vector_classifier, the hyperparameter tuned is kernel.
if current_model_name == 'support_vector_classifier':
parameters = {'kernel': ['linear', 'poly', 'rbf']}
# For decision_tree_classifier, the hyperparameters tuned are criterion, splitter, and max_depth.
elif current_model_name == 'decision_tree_classifier':
parameters = {'criterion': ['gini', 'entropy'], 'splitter': ['best', 'random'], 'max_depth': [2, 3, 4, 5, 6, 7]}
# For random_forest_classifier or extra_trees_classifier, the hyperparameters tuned are n_estimators, criterion, and
# max_depth
elif current_model_name == 'random_forest_classifier' or current_model_name == 'extra_trees_classifier':
parameters = {'n_estimators': [i * 10 for i in range(2, 11, 2)], 'criterion': ['gini', 'entropy'],
'max_depth': [2, 3, 4, 5, 6, 7]}
# For gradient_boosting_classifier, the hyperparameters tuned are loss, n_estimators, criterion, and max_depth.
elif current_model_name == 'gradient_boosting_classifier':
parameters = {'max_depth': [2, 3, 4, 5, 6, 7], 'n_estimators': [i * 10 for i in range(2, 11, 2)]}
# For gaussian_naive_bayes, none of the hyperparameters are tuned.
else:
parameters = {'None': ['None']}
return parameters
def split_data_input_target(skeleton_data: pd.DataFrame):
"""Splits skeleton_data into input and target datasets by filtering / selecting certain columns.
Args:
skeleton_data: Train / Validation / Test dataset used to split / filter certain columns.
Returns:
A tuple containing 2 numpy ndarrays for the input and target datasets.
"""
skeleton_data_input = skeleton_data.drop(columns=['data_name', 'action'])
skeleton_data_target = skeleton_data['action']
return np.array(skeleton_data_input), np.array(skeleton_data_target)
def video_based_model_testing(test_skeleton_information: pd.DataFrame,
current_model: sklearn):
"""Tests performance of the currently trained model on the validation or testing sets, where the performance is
evaluated per video / file, instead of evaluating per frame.
Args:
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the validation or testing sets.
current_model: Scikit-learn model that is currently being trained and tested.
Returns:
A tuple contains the target and predicted action for each video in the validation / testing set.
"""
# Identifies unique data_names in the validation / testing set.
test_data_names = np.unique(test_skeleton_information['data_name'])
test_target_data = []
test_predicted_data = []
# Iterates across the identified unique data names
for i in range(len(test_data_names)):
# Filters skeleton point information for the current data name.
current_data_name_skeleton_information = test_skeleton_information[test_skeleton_information['data_name'] ==
test_data_names[i]]
# Splits filtered skeleton point information into input and target data.
test_skeleton_input_data, test_skeleton_target_data = split_data_input_target(
current_data_name_skeleton_information)
# Predicts labels for each frame in the filtered skeleton point information.
test_skeleton_predicted_data = list(current_model.predict(test_skeleton_input_data))
# Identifies which predicted label has highest count and appends it to the final predicted data. Also, appends
# target label to the target data.
test_target_data.append(max(current_data_name_skeleton_information['action']))
test_predicted_data.append(max(test_skeleton_predicted_data, key=test_skeleton_predicted_data.count))
return np.array(test_target_data), np.array(test_predicted_data)
def model_training_testing(train_skeleton_information: pd.DataFrame,
validation_skeleton_information: pd.DataFrame,
test_skeleton_information: pd.DataFrame,
current_model_name: str,
parameters: dict):
"""Trains and validates model for the current model name and hyperparameters on the train_skeleton_informaiton and
validation_skeleton_information.
Args:
train_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Training set.
validation_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Validation set.
test_skeleton_information: Pandas dataframe which contains skeleton point information for all actions,
subject_ids, and takes in the Test set.
current_model_name: Name of the model currently expected to be trained.
parameters: Current parameter values used for training and validating the model.
Returns:
A tuple which contains the training metrics, validation metrics, & test metrics.
"""
# Based on the current_model_name, the scikit-learn object is initialized using the hyperparameter (if necessary)
if current_model_name == 'support_vector_classifier':
model = SVC(kernel=parameters['kernel'])
elif current_model_name == 'decision_tree_classifier':
model = DecisionTreeClassifier(criterion=parameters['criterion'], splitter=parameters['splitter'],
max_depth=parameters['max_depth'])
elif current_model_name == 'random_forest_classifier':
model = RandomForestClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'extra_trees_classifier':
model = ExtraTreesClassifier(n_estimators=parameters['n_estimators'], criterion=parameters['criterion'],
max_depth=parameters['max_depth'])
elif current_model_name == 'gradient_boosting_classifier':
model = GradientBoostingClassifier(n_estimators=parameters['n_estimators'], max_depth=parameters['max_depth'])
else:
model = GaussianNB()
# Splits Training skeleton information into input and target data.
train_skeleton_input_data, train_skeleton_target_data = split_data_input_target(train_skeleton_information)
# Trains the object created for the model using the training input and target.
model.fit(train_skeleton_input_data, train_skeleton_target_data)
# Predict video based action labels for training and validation skeleton information data.
train_skeleton_target_data, train_skeleton_predicted_data = video_based_model_testing(train_skeleton_information,
model)
validation_skeleton_target_data, validation_skeleton_predicted_data = video_based_model_testing(
validation_skeleton_information, model)
test_skeleton_target_data, test_skeleton_predicted_data = video_based_model_testing(test_skeleton_information, model)
# Calculates metrics for the predicted action labels for the training and testing sets.
train_metrics = calculate_metrics(train_skeleton_target_data, train_skeleton_predicted_data)
validation_metrics = calculate_metrics(validation_skeleton_target_data, validation_skeleton_predicted_data)
test_metrics = calculate_metrics(test_skeleton_target_data, test_skeleton_predicted_data)
return train_metrics, validation_metrics, test_metrics
def per_combination_results_export(combination_name: str,
data_split: str,
metrics_dataframe: pd.DataFrame):
"""Exports the metrics_dataframe into a CSV format to the mentioned data_split folder. If the folder does not exist,
then the folder is created.
Args:
combination_name: Name of the current combination of modalities and skeleton pose model.
data_split: Name of the split the subset of the dataset belongs to.
metrics_dataframe: A dataframe containing the mean of all the metrics for all the hyperparameters & models.
Returns:
None.
"""
directory_path = '{}/{}'.format('../results/combination_results', combination_name)
if not os.path.isdir(directory_path):
os.mkdir(directory_path)
file_path = '{}/{}.csv'.format(directory_path, data_split)
metrics_dataframe.to_csv(file_path, index=False)
def appends_parameter_metrics_combination(current_model_name: str,
current_combination_name: str,
current_split_metrics: dict,
split_metrics_dataframe: pd.DataFrame):
"""Appends the metrics for the current model and current parameter combination to the main dataframe.
Args:
current_model_name: Name of the model currently being trained.
current_combination_name: Current combination of parameters used for training the model.
current_split_metrics: Metrics for the current parameter combination for the model.
split_metrics_dataframe: Pandas dataframe which contains metrics for the current combination of modalities.
Returns:
Updated version of the pandas dataframe which contains metrics for the current combination of modalities.
"""
current_split_metrics['model_names'] = current_model_name
current_split_metrics['parameters'] = current_combination_name
split_metrics_dataframe = split_metrics_dataframe.append(current_split_metrics, ignore_index=True)
return split_metrics_dataframe
def per_combination_model_training_testing(train_subject_ids: list,
validation_subject_ids: list,
test_subject_ids: list,
n_actions: int,
n_takes: int,
current_combination_modalities: list,
skeleton_pose_model: str,
model_names: list):
"""Combines skeleton point information based on modality combination, and subject id group. Trains, validates, and
tests the list of classifier models. Calculates metrics for each data split, model and parameter combination.
Args:
train_subject_ids: List of subject ids in the training set.
validation_subject_ids: List of subject ids in the validation set.
test_subject_ids: List of subject ids in the testing set.
n_actions: Total number of actions in the original dataset.
n_takes: Total number of takes in the original dataset.
current_combination_modalities: Current combination of modalities which will be used to import and combine
the dataset.
skeleton_pose_model: Name of the model currently used for extracting skeleton model.
model_names: List of ML classifier model names which will used creating the objects.
Returns:
None.
"""
# Combines skeleton point information based on modality combination, and subject id group.
train_skeleton_information = data_combiner(n_actions, train_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
validation_skeleton_information = data_combiner(n_actions, validation_subject_ids, n_takes,
current_combination_modalities, skeleton_pose_model)
test_skeleton_information = data_combiner(n_actions, test_subject_ids, n_takes, current_combination_modalities,
skeleton_pose_model)
# Creating empty dataframes for the metrics for current modality combination's training, validation, and testing
# datasets.
metrics_features = ['accuracy_score', 'balanced_accuracy_score', 'precision_score', 'recall_score', 'f1_score']
train_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
validation_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
test_models_parameters_metrics = pd.DataFrame(columns=['model_names', 'parameters'] + metrics_features)
combination_name = '_'.join(current_combination_modalities + [skeleton_pose_model])
# Iterates across model names and parameter grid for training and testing the classification models.
for i in range(len(model_names)):
# Retrieves parameters and generates parameter combinations.
parameters = retrieve_hyperparameters(model_names[i])
parameters_grid = ParameterGrid(parameters)
for j in range(len(parameters_grid)):
current_parameters_grid_name = ', '.join(['{}={}'.format(k, parameters_grid[j][k]) for k in
parameters_grid[j].keys()])
# Performs model training and testing. Also, generates metrics for the data splits.
training_metrics, validation_metrics, test_metrics = model_training_testing(
train_skeleton_information, validation_skeleton_information, test_skeleton_information, model_names[i],
parameters_grid[j])
# Appends current modality's train, validation, and test metrics to the main dataframes.
train_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, training_metrics, train_models_parameters_metrics)
validation_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, validation_metrics, validation_models_parameters_metrics)
test_models_parameters_metrics = appends_parameter_metrics_combination(
model_names[i], current_parameters_grid_name, test_metrics, test_models_parameters_metrics)
if model_names[i] != 'gaussian_naive_bayes':
print('modality_combination={}, model={}, {} completed successfully.'.format(
combination_name, model_names[i], current_parameters_grid_name))
else:
print('modality_combination={}, model={} completed successfully.'.format(combination_name,
model_names[i]))
# Exports main training, validation and testing metrics into CSV files.
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'train_metrics',
train_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]),
'validation_metrics', validation_models_parameters_metrics)
per_combination_results_export('_'.join(current_combination_modalities + [skeleton_pose_model]), 'test_metrics',
test_models_parameters_metrics)
def main():
print()
n_actions = 27
n_subjects = 8
n_takes = 4
skeleton_pose_models = ['coco', 'mpi']
modalities = ['rgb', 'depth', 'inertial']
model_names = ['gaussian_naive_bayes', 'support_vector_classifier', 'decision_tree_classifier',
'random_forest_classifier', 'extra_trees_classifier', 'gradient_boosting_classifier']
modality_combinations = list_combinations_generator(modalities)
train_subject_ids = [i for i in range(1, n_subjects - 1)]
validation_subject_ids = [n_subjects - 1]
test_subject_ids = [n_subjects]
for i in range(len(modality_combinations)):
for j in range(len(skeleton_pose_models)):
per_combination_model_training_testing(train_subject_ids, validation_subject_ids, test_subject_ids,
n_actions, n_takes, modality_combinations[i],
skeleton_pose_models[j], model_names)
print()
if __name__ == '__main__':
main()
| 55.275 | 121 | 0.675959 | [
"MIT"
] | preetham-ganesh/multi-sensor-human-activity-recognition | codes/model_training_testing.py | 24,321 | Python |
import cv2
import keras
import math
import matplotlib.pyplot as plt
import numpy as np
import random
import warnings
from generators.utils import get_affine_transform, affine_transform
from generators.utils import gaussian_radius, draw_gaussian, gaussian_radius_2, draw_gaussian_2
class Generator(keras.utils.Sequence):
"""
Abstract generator class.
"""
def __init__(
self,
multi_scale=False,
multi_image_sizes=(320, 352, 384, 416, 448, 480, 512, 544, 576, 608),
misc_effect=None,
visual_effect=None,
batch_size=1,
group_method='ratio', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
input_size=512,
max_objects=100
):
"""
Initialize Generator object.
Args:
batch_size: The size of the batches to generate.
group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups: If True, shuffles the groups each epoch.
input_size:
max_objects:
"""
self.misc_effect = misc_effect
self.visual_effect = visual_effect
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.input_size = input_size
self.output_size = self.input_size // 4
self.max_objects = max_objects
self.groups = None
self.multi_scale = multi_scale
self.multi_image_sizes = multi_image_sizes
self.current_index = 0
# Define groups
self.group_images()
# Shuffle when initializing
if self.shuffle_groups:
random.shuffle(self.groups)
def on_epoch_end(self):
if self.shuffle_groups:
random.shuffle(self.groups)
self.current_index = 0
def size(self):
"""
Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def num_classes(self):
"""
Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def has_label(self, label):
"""
Returns True if label is a known label.
"""
raise NotImplementedError('has_label method not implemented')
def has_name(self, name):
"""
Returns True if name is a known class.
"""
raise NotImplementedError('has_name method not implemented')
def name_to_label(self, name):
"""
Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
"""
Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
def load_annotations_group(self, group):
"""
Load annotations for all images in group.
"""
# load_annotations {'labels': np.array, 'annotations': np.array}
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert (isinstance(annotations,
dict)), '\'load_annotations\' should return a list of dictionaries, received: {}'.format(
type(annotations))
assert (
'labels' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
assert (
'bboxes' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
return annotations_group
def filter_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |
(annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |
(annotations['bboxes'][:, 0] < 0) |
(annotations['bboxes'][:, 1] < 0) |
(annotations['bboxes'][:, 2] <= 0) |
(annotations['bboxes'][:, 3] <= 0) |
(annotations['bboxes'][:, 2] > image.shape[1]) |
(annotations['bboxes'][:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
group[index],
image.shape,
annotations['bboxes'][invalid_indices, :]
))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
if annotations['bboxes'].shape[0] == 0:
warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(
group[index],
image.shape,
))
return image_group, annotations_group
def clip_transformed_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
filtered_image_group = []
filtered_annotations_group = []
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
image_height = image.shape[0]
image_width = image.shape[1]
# x1
annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2)
# y1
annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)
# x2
annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)
# y2
annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
small_indices = np.where(
(annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 10) |
(annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 10)
)[0]
# delete invalid indices
if len(small_indices):
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)
# import cv2
# for invalid_index in small_indices:
# x1, y1, x2, y2 = annotations['bboxes'][invalid_index]
# label = annotations['labels'][invalid_index]
# class_name = self.labels[label]
# print('width: {}'.format(x2 - x1))
# print('height: {}'.format(y2 - y1))
# cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)
# cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey(0)
if annotations_group[index]['bboxes'].shape[0] != 0:
filtered_image_group.append(image)
filtered_annotations_group.append(annotations_group[index])
else:
warnings.warn('Image with id {} (shape {}) contains no valid boxes after transform'.format(
group[index],
image.shape,
))
return filtered_image_group, filtered_annotations_group
def load_image_group(self, group):
"""
Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_visual_effect_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
# apply visual effect
image = self.visual_effect(image)
return image, annotations
def random_visual_effect_group(self, image_group, annotations_group):
"""
Randomly apply visual effect on each image.
"""
assert (len(image_group) == len(annotations_group))
if self.visual_effect is None:
# do nothing
return image_group, annotations_group
for index in range(len(image_group)):
# apply effect on a single group entry
image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(
image_group[index], annotations_group[index]
)
return image_group, annotations_group
def random_transform_group_entry(self, image, annotations, transform=None):
"""
Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
if transform is not None or self.transform_generator:
if transform is None:
transform = adjust_transform_for_image(next(self.transform_generator), image,
self.transform_parameters.relative_translation)
# apply transformation to image
image = apply_transform(transform, image, self.transform_parameters)
# Transform the bounding boxes in the annotations.
annotations['bboxes'] = annotations['bboxes'].copy()
for index in range(annotations['bboxes'].shape[0]):
annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :])
return image, annotations
def random_transform_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_transform_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def random_misc_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
assert annotations['bboxes'].shape[0] != 0
# randomly transform both image and annotations
image, boxes = self.misc_effect(image, annotations['bboxes'])
# Transform the bounding boxes in the annotations.
annotations['bboxes'] = boxes
return image, annotations
def random_misc_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
if self.misc_effect is None:
return image_group, annotations_group
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def preprocess_group_entry(self, image, annotations):
"""
Preprocess image and its annotations.
"""
# preprocess the image
image, scale, offset_h, offset_w = self.preprocess_image(image)
# apply resizing to annotations too
annotations['bboxes'] *= scale
annotations['bboxes'][:, [0, 2]] += offset_w
annotations['bboxes'][:, [1, 3]] += offset_h
# print(annotations['bboxes'][:, [2, 3]] - annotations['bboxes'][:, [0, 1]])
return image, annotations
def preprocess_group(self, image_group, annotations_group):
"""
Preprocess each image and its annotations in its group.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# preprocess a single group entry
image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def group_images(self):
"""
Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group, annotations_group):
"""
Compute inputs for the network using an image_group.
"""
# construct an image batch object
batch_images = np.zeros((len(image_group), self.input_size, self.input_size, 3), dtype=np.float32)
batch_hms = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),
dtype=np.float32)
batch_hms_2 = np.zeros((len(image_group), self.output_size, self.output_size, self.num_classes()),
dtype=np.float32)
batch_whs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_regs = np.zeros((len(image_group), self.max_objects, 2), dtype=np.float32)
batch_reg_masks = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
batch_indices = np.zeros((len(image_group), self.max_objects), dtype=np.float32)
# copy all images to the upper left part of the image batch object
for b, (image, annotations) in enumerate(zip(image_group, annotations_group)):
c = np.array([image.shape[1] / 2., image.shape[0] / 2.], dtype=np.float32)
s = max(image.shape[0], image.shape[1]) * 1.0
trans_input = get_affine_transform(c, s, self.input_size)
# inputs
image = self.preprocess_image(image, c, s, tgt_w=self.input_size, tgt_h=self.input_size)
batch_images[b] = image
# outputs
bboxes = annotations['bboxes']
assert bboxes.shape[0] != 0
class_ids = annotations['labels']
assert class_ids.shape[0] != 0
trans_output = get_affine_transform(c, s, self.output_size)
for i in range(bboxes.shape[0]):
bbox = bboxes[i].copy()
cls_id = class_ids[i]
# (x1, y1)
bbox[:2] = affine_transform(bbox[:2], trans_output)
# (x2, y2)
bbox[2:] = affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, self.output_size - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, self.output_size - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius_h, radius_w = gaussian_radius((math.ceil(h), math.ceil(w)))
radius_h = max(0, int(radius_h))
radius_w = max(0, int(radius_w))
radius = gaussian_radius_2((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array([(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2], dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(batch_hms[b, :, :, cls_id], ct_int, radius_h, radius_w)
draw_gaussian_2(batch_hms_2[b, :, :, cls_id], ct_int, radius)
batch_whs[b, i] = 1. * w, 1. * h
batch_indices[b, i] = ct_int[1] * self.output_size + ct_int[0]
batch_regs[b, i] = ct - ct_int
batch_reg_masks[b, i] = 1
# hm = batch_hms[b, :, :, cls_id]
# hm = np.round(hm * 255).astype(np.uint8)
# hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)
# hm_2 = batch_hms_2[b, :, :, cls_id]
# hm_2 = np.round(hm_2 * 255).astype(np.uint8)
# hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)
# cv2.rectangle(hm, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)
# cv2.rectangle(hm_2, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 0), 1)
# cv2.namedWindow('hm', cv2.WINDOW_NORMAL)
# cv2.imshow('hm', np.hstack([hm, hm_2]))
# cv2.waitKey()
# print(np.sum(batch_reg_masks[b]))
# for i in range(self.num_classes()):
# plt.subplot(4, 5, i + 1)
# hm = batch_hms[b, :, :, i]
# plt.imshow(hm, cmap='gray')
# plt.axis('off')
# plt.show()
# hm = np.sum(batch_hms[0], axis=-1)
# hm = np.round(hm * 255).astype(np.uint8)
# hm = cv2.cvtColor(hm, cv2.COLOR_GRAY2BGR)
# hm_2 = np.sum(batch_hms_2[0], axis=-1)
# hm_2 = np.round(hm_2 * 255).astype(np.uint8)
# hm_2 = cv2.cvtColor(hm_2, cv2.COLOR_GRAY2BGR)
# for i in range(bboxes.shape[0]):
# x1, y1 = np.round(affine_transform(bboxes[i, :2], trans_input)).astype(np.int32)
# x2, y2 = np.round(affine_transform(bboxes[i, 2:], trans_input)).astype(np.int32)
# x1_, y1_ = np.round(affine_transform(bboxes[i, :2], trans_output)).astype(np.int32)
# x2_, y2_ = np.round(affine_transform(bboxes[i, 2:], trans_output)).astype(np.int32)
# class_id = class_ids[i]
# cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1)
# cv2.putText(image, str(class_id), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 2.0getAffineTransform, (0, 0, 0), 3)
# cv2.rectangle(hm, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)
# cv2.rectangle(hm_2, (x1_, y1_), (x2_, y2_), (0, 255, 0), 1)
# cv2.namedWindow('hm', cv2.WINDOW_NORMAL)
# cv2.imshow('hm', np.hstack([hm, hm_2]))
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey()
return [batch_images, batch_hms_2, batch_whs, batch_regs, batch_reg_masks, batch_indices]
def compute_targets(self, image_group, annotations_group):
"""
Compute target outputs for the network using images and their annotations.
"""
return np.zeros((len(image_group),))
def compute_inputs_targets(self, group):
"""
Compute inputs and target outputs for the network.
"""
# load images and annotations
# list
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly apply visual effect
image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)
#
# # randomly transform data
# image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# randomly apply misc effect
image_group, annotations_group = self.random_misc_group(image_group, annotations_group)
#
# # perform preprocessing steps
# image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
#
# # check validity of annotations
# image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)
if len(image_group) == 0:
return None, None
# compute network inputs
inputs = self.compute_inputs(image_group, annotations_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
return inputs, targets
def __len__(self):
"""
Number of batches for generator.
"""
return len(self.groups)
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
group = self.groups[self.current_index]
if self.multi_scale:
if self.current_index % 10 == 0:
random_size_index = np.random.randint(0, len(self.multi_image_sizes))
self.image_size = self.multi_image_sizes[random_size_index]
inputs, targets = self.compute_inputs_targets(group)
while inputs is None:
current_index = self.current_index + 1
if current_index >= len(self.groups):
current_index = current_index % (len(self.groups))
self.current_index = current_index
group = self.groups[self.current_index]
inputs, targets = self.compute_inputs_targets(group)
current_index = self.current_index + 1
if current_index >= len(self.groups):
current_index = current_index % (len(self.groups))
self.current_index = current_index
return inputs, targets
def preprocess_image(self, image, c, s, tgt_w, tgt_h):
trans_input = get_affine_transform(c, s, (tgt_w, tgt_h))
image = cv2.warpAffine(image, trans_input, (tgt_w, tgt_h), flags=cv2.INTER_LINEAR)
image = image.astype(np.float32)
image[..., 0] -= 103.939
image[..., 1] -= 116.779
image[..., 2] -= 123.68
return image
def get_transformed_group(self, group):
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
return image_group, annotations_group
def get_cropped_and_rotated_group(self, group):
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly transform data
image_group, annotations_group = self.random_crop_group(image_group, annotations_group)
image_group, annotations_group = self.random_rotate_group(image_group, annotations_group)
return image_group, annotations_group
| 43.223592 | 145 | 0.583479 | [
"Apache-2.0"
] | lbcsept/keras-CenterNet | generators/common.py | 24,551 | Python |
#https://leetcode.com/problems/reverse-linked-list/
# Iterative method
#Approach :
# Store the head in a temp variable called current .
# curr = head , prev = null
# Now for a normal linked list , the current will point to the next node and so on till null
# For reverse linked list, the current node should point to the previous node and the first node here will point to null
# Keep iterating the linkedlist until the last node and keep changing the next of the current node to prev node and also
# update the prev node to current node and current node to next node
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def reverseList(self, head):
curr = head
prev = None
while(curr != None):
next = curr.next
curr.next = prev
prev = curr
curr = next
return prev
| 28.235294 | 121 | 0.638542 | [
"MIT"
] | 5l1v3r1/CompetitiveProgrammingQuestionBank | DSA 450 GFG/reverse_linked_list_iterative.py | 960 | Python |
# Generated by Django 2.2.8 on 2020-01-05 15:27
from django.db import migrations
import pyuploadcare.dj.models
class Migration(migrations.Migration):
dependencies = [
('awards', '0005_auto_20200104_1643'),
]
operations = [
migrations.AlterField(
model_name='projo_post',
name='landing_page_pic',
field=pyuploadcare.dj.models.ImageField(),
),
]
| 21.25 | 54 | 0.628235 | [
"MIT"
] | petermirithu/Grant_py | awards/migrations/0006_auto_20200105_1827.py | 425 | Python |
INFO = dict(
name='byecha',
description='Log dumper of Chatwork, inspired by goodbye_chatwork',
author='nothink',
author_email='nothing@yasagure.jp',
license='MIT License',
url='https://github.com/nothink/byecha',
classifiers=[
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License'
]
)
| 30.6875 | 71 | 0.602851 | [
"MIT"
] | nothink/ByeCha | info.py | 491 | Python |
import numpy as np
import math
def Sphere(ind):
sum = 0
for i in ind:
sum += i**2
return sum
def Rastrigin(ind):
sum = 10 * len(ind)
for i in ind:
sum += i**2 - 10 * np.cos(2*np.pi*i)
return sum
def Rosenbrock(ind):
sum = 0
for i in range(len(ind) - 1):
sum += 100 * (ind[i + 1] - ind[i]**2)**2 + (ind[i] - 1)**2
return sum
def Griewank(d):
sum_1 = 0
for i in d:
sum_1 += (i*i)/4000
sum_2 = 1
for i in range(len(d)):
sum_2 *= np.cos(d[i]/math.sqrt(i + 1))
return sum_1 - sum_2 + 1
def Ackley(d):
a = 20
b = 0.2
c = 2 * np.pi
sum1 = 0
sum2 = 0
for i in range(len(d)):
sum1 += d[i] ** 2
sum2 += np.cos(c * d[i])
term1 = -a * np.exp(-b * np.sqrt(sum1 / len(d)))
term2 = -np.exp(sum2 / len(d))
return term1 + term2 + a + np.exp(1)
| 20.604651 | 66 | 0.479684 | [
"MIT"
] | ngctnnnn/Simulation_experiments_for_optimizing_objective_function | test_function.py | 886 | Python |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/03_models.base_models.ipynb (unless otherwise specified).
__all__ = ['MLBEndpointBase', 'MLBEndpointReference', 'CustomInt', 'PositionBase', 'PlayerHandedness', 'PersonBase',
'MLBPerson', 'Coordinates', 'TimeZone', 'VenueLocation', 'TurfType', 'RoofType', 'FieldInfo']
# Internal Cell
from pydantic import (
BaseModel,
HttpUrl,
validator,
constr
)
from enum import Enum
from typing import Optional
# Cell
class MLBEndpointBase(BaseModel):
link: HttpUrl
@validator('link',pre=True)
def add_base_url_to_link(cls,link):
return 'https://statsapi.mlb.com'+link
class MLBEndpointReference(MLBEndpointBase):
id: int
name: Optional[str]=None
# Cell
class CustomInt(str):
@classmethod
def __get_validators__(cls):
# one or more validators may be yielded which will be called in the
# order to validate the input, each validator will receive as an input
# the value returned from the previous validator
yield cls.validate
@classmethod
def validate(cls, v):
if v == '-':
return None
else:
return int(v)
# Cell
class PositionBase(BaseModel):
code: str
name: str
type: str
abbreviation: str
class PlayerHandedness(BaseModel):
code: constr(max_length=1)
description: str
# Cell
class PersonBase(MLBEndpointReference):
class Config:
fields = {
'name':{
'alias':'fullName'
}
}
class MLBPerson(PersonBase):
firstName: str
lastName: str
height: str
weight: int
Optiactive: bool
primaryPosition: PositionBase
useName: str
boxscoreName: str
gender: str
isPlayer: bool
isVerified: bool
nameSlug: str
# Cell
class Coordinates(BaseModel):
latitude: float
longitude: float
class TimeZone(BaseModel):
id: str
offset: int
tz: str
class VenueLocation(BaseModel):
city: str
state: str
stateAbbrev: str
defaultCoordinates: Coordinates
# Cell
class TurfType(str,Enum):
Grass = 'Grass'
Artificial = 'Artificial'
class RoofType(str,Enum):
Open = 'Open'
Dome = 'Dome'
Retractable = 'Retractable'
class FieldInfo(BaseModel):
capacity: int
turfType: TurfType
roofType: RoofType
leftLine: int
leftCenter: int
center: int
rightCenter: int
rightLine: int
# Internal Cell
class AbstractGameState(str,Enum):
Final = 'Final'
Live = 'Live'
Other = 'Other'
Preview = 'Preview'
class AbstractGameCode(str,Enum):
F='F'
L='L'
O='O'
P='P'
# Internal Cell
class GameType(str,Enum):
spring_training = 'S'
regular_season = 'R'
wild_card_game = 'F'
division_series = 'D'
league_championship_series = 'L'
world_series = 'W'
championship = 'C'
nineteenth_centure_series = 'N'
playoffs = 'P'
all_star_game = 'A'
intrasquad = 'I'
exhibition = 'E'
# Internal Cell
class GamedayType(str,Enum):
box_score_only = 'B'
pitch_by_pitch = 'D'
enhanced = 'E'
linescore_only = 'L'
play_by_play = 'N'
premium = 'P'
score_only = 'S'
regular = 'Y' | 18.959064 | 116 | 0.638186 | [
"Apache-2.0"
] | schlinkertc/MLB_DataDevTools | MLB_DataDevTools/models/base_models.py | 3,242 | Python |
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from networks.embedding import *
from networks.encoder import *
from networks.selector import *
from networks.classifier import *
from .Model import Model
class PCNN_ONE(Model):
def __init__(self, config):
super(PCNN_ONE, self).__init__(config)
self.encoder = PCNN(config)
# self.selector = One(config, config.hidden_size * 3)
self.selector = One(config, 3*config.sen_hidden_size)
| 29.263158 | 55 | 0.784173 | [
"MIT"
] | ningpang/com-CNN | PCNN+WA+PF/models/PCNN_ONE.py | 556 | Python |
import re
def validate_sheet_name(sheet_name):
if len(sheet_name) > 33:
return False
return not re.search("[%#&/\*\?\\\]", sheet_name)
def sanitise_sheet_name(sheet_name):
replaced_with_empty = ["%", "#", "&", "*", "?"]
replaced_with_hyphens = ["/", "\\"]
for ch in replaced_with_empty:
sheet_name = sheet_name.replace(ch, "")
for ch in replaced_with_hyphens:
sheet_name = sheet_name.replace(ch, "-")
return sheet_name[0:33]
| 25.368421 | 53 | 0.618257 | [
"MIT"
] | simkimsia/ug-read-write-excel-using-python | examples/c07_2_rename_validate_sheet_name/custom/index.py | 482 | Python |
import unicodedb_shim as unicodedb
from data import Position
class CStream(object):
def __init__(self, source, index=0, col=0, lno=1):
self.col = col
self.index = index
self.lno = lno
self.source = source
def advance(self):
c = self.current
self.index += 1
self.col += 1
if c == u'\n':
self.lno += 1
self.col = 0
return c
@property
def current(self):
return self.source[self.index]
def pair_ahead(self, table):
if self.index + 1 < len(self.source):
return self.source[self.index:self.index+2] in table
return False
@property
def filled(self):
return self.index < len(self.source)
@property
def position(self):
return Position(self.col, self.lno)
def is_sym(self):
if self.filled:
ch = self.current
return unicodedb.isalpha(ord(ch)) or ch == '_'
return False
def is_digit(self):
if self.filled:
return self.current in u'0123456789'
return False
def is_hex(self):
if self.filled:
return self.current in u'0123456789abcdefABCDEF'
return False
def is_space(self):
if self.filled:
return unicodedb.isspace(ord(self.current))
return False
| 23.982456 | 64 | 0.567666 | [
"MIT"
] | cheery/lever | compiler/lever_parser/reader/stream.py | 1,367 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# stdlib imports
import os.path
import re
# third party imports
import numpy as np
import pandas as pd
import pkg_resources
# Local imports
from gmprocess.metrics.station_summary import StationSummary
from gmprocess.core.stationstream import StationStream
from gmprocess.core.stationtrace import StationTrace
def test_fas():
"""
Testing based upon the work provided in
https://github.com/arkottke/notebooks/blob/master/effective_amp_spectrum.ipynb
"""
ddir = os.path.join('data', 'testdata')
datadir = pkg_resources.resource_filename('gmprocess', ddir)
fas_file = os.path.join(datadir, 'fas_greater_of_two_horizontals.pkl')
p1 = os.path.join(datadir, 'peer', 'RSN763_LOMAP_GIL067.AT2')
p2 = os.path.join(datadir, 'peer', 'RSN763_LOMAP_GIL337.AT2')
stream = StationStream([])
for idx, fpath in enumerate([p1, p2]):
with open(fpath, encoding='utf-8') as file_obj:
for _ in range(3):
next(file_obj)
meta = re.findall(r'[.0-9]+', next(file_obj))
dt = float(meta[1])
accels = np.array(
[col for line in file_obj for col in line.split()])
trace = StationTrace(data=accels, header={
'channel': 'H' + str(idx),
'delta': dt,
'units': 'acc',
'standard': {
'corner_frequency': np.nan,
'station_name': '',
'source': 'json',
'instrument': '',
'instrument_period': np.nan,
'source_format': 'json',
'comments': '',
'structure_type': '',
'sensor_serial_number': '',
'source_file': '',
'process_level': 'raw counts',
'process_time': '',
'horizontal_orientation': np.nan,
'vertical_orientation': np.nan,
'units': 'acc',
'units_type': 'acc',
'instrument_sensitivity': np.nan,
'instrument_damping': np.nan
}
})
stream.append(trace)
for tr in stream:
response = {'input_units': 'counts', 'output_units': 'cm/s^2'}
tr.setProvenance('remove_response', response)
target_df = pd.read_pickle(fas_file)
ind_vals = target_df.index.values
per = np.unique([
float(i[0].split(')')[0].split('(')[1]) for i in ind_vals]
)
freqs = 1 / per
imts = ['fas' + str(p) for p in per]
summary = StationSummary.from_stream(
stream, ['greater_of_two_horizontals'], imts, bandwidth=30)
pgms = summary.pgms
# pgms.to_pickle(fas_file)
for idx, f in enumerate(freqs):
fstr = 'FAS(%.3f)' % (1 / f)
fval1 = pgms.loc[fstr, 'GREATER_OF_TWO_HORIZONTALS'].Result
fval2 = target_df.loc[fstr, 'GREATER_OF_TWO_HORIZONTALS'].Result
np.testing.assert_allclose(fval1, fval2, rtol=1e-5, atol=1e-5)
if __name__ == '__main__':
test_fas()
| 33.483516 | 82 | 0.575648 | [
"Unlicense"
] | jrekoske-usgs/groundmotion-processing | tests/gmprocess/metrics/imt/fas_greater_of_two_test.py | 3,047 | Python |
import sys
import flask
from flask import Response, request, jsonify, send_file
from . import tagger
app = flask.Flask(__name__)
app.config['JSON_SORT_KEYS'] = False
app.url_map.strict_slashes = False
ROOT = '/spacy-ner'
@app.route(ROOT, methods=['GET'])
def home():
resp = Response(
response='{"endpoints": ["GET /", "GET /mapping", "POST /fields"]}',
mimetype="application/json",
status=200
)
return resp
@app.route(ROOT + '/mapping', methods=['GET'])
def mapping():
return send_file('../../mapping.json', 'application/json')
@app.route(ROOT + '/fields', methods=['POST'])
def tag():
text = request.form['file']
short = ((text[:98] + '..') if len(text) > 100 else text).replace('\n', ' ').replace('\r', ' ')
print('create fields of [' + short + ']', file=sys.stderr)
entities = tagger.tag(text)
return jsonify({'contents': text, 'entity': entities})
app.run(host='0.0.0.0', port=8080, debug=False)
| 24.897436 | 99 | 0.623069 | [
"Apache-2.0"
] | BasLee/textrepo | elasticsearch/spacy-ner/py/app/router.py | 971 | Python |
import os
import nuke
import pyblish.api
import pype
class ExtractReviewData(pype.api.Extractor):
"""Extracts movie and thumbnail with baked in luts
must be run after extract_render_local.py
"""
order = pyblish.api.ExtractorOrder + 0.01
label = "Extract Review Data"
families = ["review"]
hosts = ["nuke"]
def process(self, instance):
# Store selection
selection = [i for i in nuke.allNodes() if i["selected"].getValue()]
# Deselect all nodes to prevent external connections
[i["selected"].setValue(False) for i in nuke.allNodes()]
self.log.debug("creating staging dir:")
self.staging_dir(instance)
self.log.debug("instance: {}".format(instance))
self.log.debug("instance.data[families]: {}".format(
instance.data["families"]))
if "still" not in instance.data["families"]:
self.render_review_representation(instance,
representation="mov")
self.render_review_representation(instance,
representation="jpeg")
else:
self.render_review_representation(instance, representation="jpeg")
# Restore selection
[i["selected"].setValue(False) for i in nuke.allNodes()]
[i["selected"].setValue(True) for i in selection]
def render_review_representation(self,
instance,
representation="mov"):
assert instance.data['representations'][0]['files'], "Instance data files should't be empty!"
temporary_nodes = []
stagingDir = instance.data[
'representations'][0]["stagingDir"].replace("\\", "/")
self.log.debug("StagingDir `{0}`...".format(stagingDir))
collection = instance.data.get("collection", None)
if collection:
# get path
fname = os.path.basename(collection.format(
"{head}{padding}{tail}"))
fhead = collection.format("{head}")
# get first and last frame
first_frame = min(collection.indexes)
last_frame = max(collection.indexes)
else:
fname = os.path.basename(instance.data.get("path", None))
fhead = os.path.splitext(fname)[0] + "."
first_frame = instance.data.get("frameStart", None)
last_frame = instance.data.get("frameEnd", None)
rnode = nuke.createNode("Read")
rnode["file"].setValue(
os.path.join(stagingDir, fname).replace("\\", "/"))
rnode["first"].setValue(first_frame)
rnode["origfirst"].setValue(first_frame)
rnode["last"].setValue(last_frame)
rnode["origlast"].setValue(last_frame)
temporary_nodes.append(rnode)
previous_node = rnode
# get input process and connect it to baking
ipn = self.get_view_process_node()
if ipn is not None:
ipn.setInput(0, previous_node)
previous_node = ipn
temporary_nodes.append(ipn)
reformat_node = nuke.createNode("Reformat")
ref_node = self.nodes.get("Reformat", None)
if ref_node:
for k, v in ref_node:
self.log.debug("k,v: {0}:{1}".format(k,v))
if isinstance(v, unicode):
v = str(v)
reformat_node[k].setValue(v)
reformat_node.setInput(0, previous_node)
previous_node = reformat_node
temporary_nodes.append(reformat_node)
dag_node = nuke.createNode("OCIODisplay")
dag_node.setInput(0, previous_node)
previous_node = dag_node
temporary_nodes.append(dag_node)
# create write node
write_node = nuke.createNode("Write")
if representation in "mov":
file = fhead + "baked.mov"
name = "baked"
path = os.path.join(stagingDir, file).replace("\\", "/")
self.log.debug("Path: {}".format(path))
instance.data["baked_colorspace_movie"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("mov")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
tags = ["review", "delete"]
elif representation in "jpeg":
file = fhead + "jpeg"
name = "thumbnail"
path = os.path.join(stagingDir, file).replace("\\", "/")
instance.data["thumbnail"] = path
write_node["file"].setValue(path)
write_node["file_type"].setValue("jpeg")
write_node["raw"].setValue(1)
write_node.setInput(0, previous_node)
temporary_nodes.append(write_node)
tags = ["thumbnail"]
# retime for
first_frame = int(last_frame) / 2
last_frame = int(last_frame) / 2
repre = {
'name': name,
'ext': representation,
'files': file,
"stagingDir": stagingDir,
"frameStart": first_frame,
"frameEnd": last_frame,
"anatomy_template": "render",
"tags": tags
}
instance.data["representations"].append(repre)
# Render frames
nuke.execute(write_node.name(), int(first_frame), int(last_frame))
self.log.debug("representations: {}".format(instance.data["representations"]))
# Clean up
for node in temporary_nodes:
nuke.delete(node)
def get_view_process_node(self):
# Select only the target node
if nuke.selectedNodes():
[n.setSelected(False) for n in nuke.selectedNodes()]
ipn_orig = None
for v in [n for n in nuke.allNodes()
if "Viewer" in n.Class()]:
ip = v['input_process'].getValue()
ipn = v['input_process_node'].getValue()
if "VIEWER_INPUT" not in ipn and ip:
ipn_orig = nuke.toNode(ipn)
ipn_orig.setSelected(True)
if ipn_orig:
nuke.nodeCopy('%clipboard%')
[n.setSelected(False) for n in nuke.selectedNodes()] # Deselect all
nuke.nodePaste('%clipboard%')
ipn = nuke.selectedNode()
return ipn
| 34.132979 | 101 | 0.56475 | [
"MIT"
] | tws0002/pype | pype/plugins/nuke/publish/extract_review_data.py | 6,417 | Python |
"""
TODO docs
High-level commands to execute tasks
"""
from watsoncloud.foo.project_settings import *
from watsoncloud.foo.audio_video import *
from watsoncloud.foo.watson import *
from multiprocessing import Process
from os.path import exists
def init_project(video_filename):
# creates a project directory and a copy of the file
# returns the project slug
project_slug = init_project_from_video_file(video_filename)
return project_slug
def split_audio(project_slug):
# extracts audio from existing video file
# creates segments
# yields filenames of segments
vid_path = full_video_path(project_slug)
audio_path = full_audio_path(project_slug)
extract_audio_file(vid_path, audio_path)
# segments the audio (via a generator)
segments = segment_audio_file(audio_path, audio_segments_dir(project_slug))
for seg_filename in segments:
yield seg_filename
def transcribe_audio(project_slug, creds, overwrite=False):
# Send each audio segment in a project to Watson Speech-to-Text API
# with the POWER OF MULTITHREADED PRPOCESSINGASDF!!
#
# returns nothing...just prints to screen
"""
project_slug: ./projects/audiostreams/filename.wav
"""
# audio_segments_fnames = audio_segments_filenames(project_slug)
watson_jobs = []
# for audio_fn in audio_segments_fnames:
audio_fn = project_slug + '.wav'
print("audio_filename:"+audio_fn)
time_slug = make_slug_from_path(audio_fn)
transcript_fn = join(transcripts_dir(project_slug), time_slug) + '.json'
print("transcript_fn"+transcript_fn)
if not exists(transcript_fn):
print("Sending to Watson API:\n\t", audio_fn)
job = Process(target=process_transcript_call,
args=(audio_fn, transcript_fn, creds))
job.start()
watson_jobs.append(job)
# Wait for all jobs to end
for job in watson_jobs:
job.join()
return transcript_fn
def compile_transcripts(project_slug):
pass
def supercut(project_slug, regex_pattern):
pass
def process_transcript_call(audio_filename, transcript_path, creds):
resp = speech_to_text_api_call(
audio_filename,
username=creds['username'],
password=creds['password'])
with open(transcript_path, 'w') as t:
t.write(resp.text)
print("Transcribed:\n\t", transcript_path)
| 28.831325 | 79 | 0.716674 | [
"MIT"
] | audip/youtubeseek | watsoncloud/foo/high.py | 2,393 | Python |
# coding=utf8
from __future__ import unicode_literals
import codecs
from pyecharts import (Bar, Scatter3D)
from pyecharts import Page
from pyecharts.conf import configure, online
from test.constants import RANGE_COLOR, CLOTHES
def create_three():
# bar
v1 = [5, 20, 36, 10, 75, 90]
v2 = [10, 25, 8, 60, 20, 80]
bar = Bar("柱状图数据堆叠示例")
bar.add("商家A", CLOTHES, v1, is_stack=True)
bar.add("商家B", CLOTHES, v2, is_stack=True)
# scatter3D
import random
data = [
[random.randint(0, 100),
random.randint(0, 100),
random.randint(0, 100)] for _ in range(80)
]
scatter3d = Scatter3D("3D 散点图示例", width=1200, height=600)
scatter3d.add("", data, is_visualmap=True, visual_range_color=RANGE_COLOR)
return Page.from_charts(bar, scatter3d)
def test_custom_templates():
configure(
jshost='https://chfw.github.io/jupyter-echarts/echarts',
force_js_embed=False
)
page = create_three()
# page.js_dependencies = ['echarts.min']
page.render(path='new_version_page.html')
with codecs.open('new_version_page.html', 'r', 'utf-8') as f:
actual_content = f.read()
assert "</html>" in actual_content
def test_custom_template_for_chart():
data = [{
'name': '衬衫',
'value': 5
}, {
'name': '羊毛衫',
'value': 20
}, {
'name': '雪纺衫',
'value': 36
}]
configure(echarts_template_dir='.')
online()
data1 = {'衬衫': '34', '羊毛衫': 45, '雪纺衫': 40}
names, values = Bar.cast(data)
names1, values1 = Bar.cast(data1)
bar = Bar("柱状图数据堆叠示例")
bar.add("商家A", names, values, is_stack=True)
bar.add("商家B", names1, values1, is_stack=True)
bar.render(path='new_version_bar.html')
with codecs.open('new_version_bar.html', 'r', 'utf-8') as f:
actual_content = f.read()
assert "</html>" in actual_content
| 26.541667 | 78 | 0.614861 | [
"MIT"
] | RuanJylf/Data_Visualization | pyecharts-0.3.1/test/test_custom_render.py | 2,005 | Python |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2020 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
""" This module provides widgets for displaying plots of
scalar data assigned to one- and two-dimensional grids using
:mod:`matplotlib`. These widgets are not intended to be used directly.
"""
import numpy as np
from IPython.core.display import display, HTML
from matplotlib import animation, pyplot
from pymor.core.base import abstractmethod
from pymor.core.config import config
from pymor.discretizers.builtin.grids.constructions import flatten_grid
from pymor.discretizers.builtin.grids.referenceelements import triangle, square
class MatplotlibAxesBase:
def __init__(self, figure, sync_timer, grid, U=None, vmin=None, vmax=None, codim=2, separate_axes=False, columns=2,
aspect_ratio=1):
# aspect_ratio is height/width
self.vmin = vmin
self.vmax = vmax
self.codim = codim
self.grid = grid
if separate_axes:
if len(U) == 1:
columns = 1 # otherwise we get a sep axes object with 0 data
rows = int(np.ceil(len(U) / columns))
self.ax = figure.subplots(rows, columns, squeeze=False).flatten()
else:
self.ax = (figure.gca(),)
for ax in self.ax:
ax.set_aspect(aspect_ratio)
self.figure = figure
self.codim = codim
self.grid = grid
self.separate_axes = separate_axes
self.count = len(U) if separate_axes or isinstance(U, tuple) else 1
self.aspect_ratio = aspect_ratio
self._plot_init()
# assignment delayed to ensure _plot_init works w/o data
self.U = U
# Rest is only needed with animation
if U is not None and not separate_axes and self.count == 1:
assert len(self.ax) == 1
delay_between_frames = 200 # ms
self.anim = animation.FuncAnimation(figure, self.animate,
frames=U, interval=delay_between_frames,
blit=True, event_source=sync_timer)
# generating the HTML instance outside this class causes the plot display to fail
self.html = HTML(self.anim.to_jshtml())
else:
self.set(self.U)
@abstractmethod
def _plot_init(self):
"""Setup MPL figure display with empty data."""
pass
@abstractmethod
def set(self, U):
"""Load new data into existing plot objects."""
pass
@abstractmethod
def animate(self, u):
"""Load new data into existing plot objects."""
pass
class MatplotlibPatchAxes(MatplotlibAxesBase):
def __init__(self, figure, grid, bounding_box=None, U=None, vmin=None, vmax=None, codim=2, columns=2,
colorbar=True, sync_timer=None):
assert grid.reference_element in (triangle, square)
assert grid.dim == 2
assert codim in (0, 2)
subentities, coordinates, entity_map = flatten_grid(grid)
self.subentities = subentities if grid.reference_element is triangle \
else np.vstack((subentities[:, 0:3], subentities[:, [2, 3, 0]]))
self.coordinates = coordinates
self.entity_map = entity_map
self.reference_element = grid.reference_element
self.colorbar = colorbar
self.animate = self.set
if bounding_box is None:
bounding_box = grid.bounding_box()
assert len(bounding_box) == 2 and all(len(b) == 2 for b in bounding_box)
aspect_ratio = (bounding_box[1][1] - bounding_box[0][1]) / (bounding_box[1][0] - bounding_box[0][0])
super().__init__(U=U, figure=figure, grid=grid, vmin=vmin, vmax=vmax, codim=codim, columns=columns,
sync_timer=sync_timer, aspect_ratio=aspect_ratio)
def _plot_init(self):
if self.codim == 2:
self.p = self.ax[0].tripcolor(self.coordinates[:, 0], self.coordinates[:, 1], self.subentities,
np.zeros(len(self.coordinates)),
vmin=self.vmin, vmax=self.vmax, shading='gouraud')
else:
self.p = self.ax[0].tripcolor(self.coordinates[:, 0], self.coordinates[:, 1], self.subentities,
facecolors=np.zeros(len(self.subentities)),
vmin=self.vmin, vmax=self.vmax, shading='flat')
if self.colorbar:
# thin plots look ugly with a huge colorbar on the right
if self.aspect_ratio < 0.75:
orientation = 'horizontal'
else:
orientation = 'vertical'
self.figure.colorbar(self.p, ax=self.ax[0], orientation=orientation)
def set(self, U, vmin=None, vmax=None):
self.vmin = self.vmin if vmin is None else vmin
self.vmax = self.vmax if vmax is None else vmax
if self.codim == 2:
self.p.set_array(U)
elif self.reference_element is triangle:
self.p.set_array(U)
else:
self.p.set_array(np.tile(U, 2))
self.p.set_clim(self.vmin, self.vmax)
return (self.p,)
class Matplotlib1DAxes(MatplotlibAxesBase):
def __init__(self, U, figure, grid, vmin=None, vmax=None, codim=1, separate_axes=False, sync_timer=None,
columns=2):
assert isinstance(grid, OnedGrid)
assert codim in (0, 1)
super().__init__(U=U, figure=figure, grid=grid, vmin=vmin, vmax=vmax, codim=codim, columns=columns,
sync_timer=sync_timer, separate_axes=separate_axes)
def _plot_init(self):
centers = self.grid.centers(1)
if self.grid.identify_left_right:
centers = np.concatenate((centers, [[self.grid.domain[1]]]), axis=0)
self.periodic = True
else:
self.periodic = False
if self.codim == 1:
xs = centers
else:
xs = np.repeat(centers, 2)[1:-1]
if self.separate_axes:
self.lines = [ax.plot(xs, np.zeros_like(xs))[0] for ax in self.ax]
else:
self.lines = [self.ax[0].plot(xs, np.zeros_like(xs))[0] for _ in range(self.count)]
pad = (self.vmax - self.vmin) * 0.1
for ax in self.ax:
ax.set_ylim(self.vmin - pad, self.vmax + pad)
def _set(self, u, i):
if self.codim == 1:
if self.periodic:
self.lines[i].set_ydata(np.concatenate((u, [self.U[0]])))
else:
self.lines[i].set_ydata(u)
else:
self.lines[i].set_ydata(np.repeat(u, 2))
def animate(self, u):
for i in range(len(self.ax)):
self._set(u, i)
return self.lines
def set(self, U, vmin=None, vmax=None):
self.vmin = self.vmin if vmin is None else vmin
self.vmax = self.vmax if vmax is None else vmax
if isinstance(U, tuple):
for i, u in enumerate(U):
self._set(u, i)
else:
for i, (u, _) in enumerate(zip(U, self.ax)):
self._set(u, i)
pad = (self.vmax - self.vmin) * 0.1
for ax in self.ax:
ax.set_ylim(self.vmin - pad, self.vmax + pad)
if config.HAVE_QT and config.HAVE_MATPLOTLIB:
from Qt.QtWidgets import QSizePolicy
import Qt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from pymor.discretizers.builtin.grids.oned import OnedGrid
# noinspection PyShadowingNames
class Matplotlib1DWidget(FigureCanvas):
def __init__(self, U, parent, grid, count, vmin=None, vmax=None, legend=None, codim=1,
separate_plots=False, dpi=100):
assert isinstance(grid, OnedGrid)
assert codim in (0, 1)
figure = Figure(dpi=dpi)
if not separate_plots:
axes = figure.gca()
self.codim = codim
lines = ()
centers = grid.centers(1)
if grid.identify_left_right:
centers = np.concatenate((centers, [[grid.domain[1]]]), axis=0)
self.periodic = True
else:
self.periodic = False
if codim == 1:
xs = centers
else:
xs = np.repeat(centers, 2)[1:-1]
for i in range(count):
if separate_plots:
figure.add_subplot(int(count / 2) + count % 2, 2, i + 1)
axes = figure.gca()
pad = (vmax[i] - vmin[i]) * 0.1
axes.set_ylim(vmin[i] - pad, vmax[i] + pad)
l, = axes.plot(xs, np.zeros_like(xs))
lines = lines + (l,)
if legend and separate_plots:
axes.legend([legend[i]])
if not separate_plots:
pad = (max(vmax) - min(vmin)) * 0.1
axes.set_ylim(min(vmin) - pad, max(vmax) + pad)
if legend:
axes.legend(legend)
self.lines = lines
super().__init__(figure)
self.setParent(parent)
self.setMinimumSize(300, 300)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
def set(self, U, ind):
for l, u in zip(self.lines, U):
if self.codim == 1:
if self.periodic:
l.set_ydata(np.concatenate((u[ind], [u[ind][0]])))
else:
l.set_ydata(u[ind])
else:
l.set_ydata(np.repeat(u[ind], 2))
self.draw()
class MatplotlibPatchWidget(FigureCanvas):
def __init__(self, parent, grid, bounding_box=None, vmin=None, vmax=None, codim=2, dpi=100):
assert grid.reference_element in (triangle, square)
assert grid.dim == 2
assert codim in (0, 2)
self.figure = Figure(dpi=dpi)
super().__init__(self.figure)
self.setParent(parent)
self.setMinimumSize(300, 300)
self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding))
self.patch_axes = MatplotlibPatchAxes(figure=self.figure, grid=grid, bounding_box=bounding_box,
vmin=vmin, vmax=vmax, codim=codim)
def set(self, U, vmin=None, vmax=None):
self.patch_axes.set(U, vmin, vmax)
self.draw()
else:
class Matplotlib1DWidget:
pass
class MatplotlibPatchWidget:
pass
| 38.052817 | 119 | 0.573332 | [
"Unlicense"
] | TreeerT/pymor | src/pymor/discretizers/builtin/gui/matplotlib.py | 10,807 | Python |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Fetches, cleans, outputs TMCFs and CSVs for all World Bank development
indicator codes provided in WorldBankIndicators.csv for all years and for
all countries provided in WorldBankCountries.csv. """
from absl import app
import pandas as pd
import itertools
import requests
import zipfile
import io
import re
# Remaps the columns provided by World Bank API.
WORLDBANK_COL_REMAP = {
'Country Name': 'CountryName',
'Country Code': 'CountryCode',
'Indicator Name': 'IndicatorName',
'Indicator Code': 'IndicatorCode'
}
TEMPLATE_TMCF = """
Node: E:WorldBank->E0
typeOf: dcs:StatVarObservation
variableMeasured: C:WorldBank->StatisticalVariable
observationDate: C:WorldBank->Year
observationPeriod: "P1Y"
observationAbout: C:WorldBank->ISO3166Alpha3
value: C:WorldBank->Value
"""
TEMPLATE_STAT_VAR = """
Node: dcid:WorldBank/{INDICATOR}
name: "{NAME}"
description: "{DESCRIPTION}"
typeOf: dcs:StatisticalVariable
populationType: dcs:{populationType}
statType: dcs:{statType}
measuredProperty: dcs:{measuredProperty}
measurementDenominator: dcs:{measurementDenominator}
{CONSTRAINTS}
"""
def read_worldbank(iso3166alpha3):
""" Fetches and tidies all ~1500 World Bank indicators
for a given ISO 3166 alpha 3 code.
For a particular alpha 3 code, this function fetches the entire ZIP
file for that particular country for all World Bank indicators in a
wide format where years are columns. The dataframe is changed into a
narrow format so that year becomes a single column with each row
representing a different year for a single indicator.
Args:
iso3166alpha3: ISO 3166 alpha 3 for a country, as a string.
Returns:
A tidied pandas dataframe with all indicator codes for a particular
country in the format of (country, indicator, year, value).
Notes:
Takes approximately 10 seconds to download and
tidy one country in a Jupyter notebook.
"""
country_zip = ("http://api.worldbank.org/v2/en/country/" + iso3166alpha3 +
"?downloadformat=csv")
r = requests.get(country_zip)
filebytes = io.BytesIO(r.content)
myzipfile = zipfile.ZipFile(filebytes)
# We need to select the data file which starts with "API",
# but does not have an otherwise regular filename structure.
file_to_open = None
for file in myzipfile.namelist():
if file.startswith("API"):
file_to_open = file
break
assert file_to_open is not None, \
"Failed to find data for" + iso3166alpha3
df = None
# Captures any text contained in double quotatations.
line_match = re.compile(r"\"([^\"]*)\"")
for line in myzipfile.open(file_to_open).readlines():
# Cells are contained in quotations and comma separated.
cols = line_match.findall(line.decode("utf-8"))
# CSVs include header informational lines which should be ignored.
if len(cols) > 2:
# Use first row as the header.
if df is None:
df = pd.DataFrame(columns=cols)
else:
df = df.append(pd.DataFrame([cols], columns=df.columns),
ignore_index=True)
df = df.rename(columns=WORLDBANK_COL_REMAP)
# Turn each year into its own row.
df = df.set_index(
['CountryName', 'CountryCode', 'IndicatorName', 'IndicatorCode'])
df = df.stack()
df.index = df.index.rename('year', level=4)
df.name = "Value"
df = df.reset_index()
# Convert to numeric and drop empty values.
df['Value'] = pd.to_numeric(df['Value'])
df = df.dropna()
return df
def build_stat_vars_from_indicator_list(row):
""" Generates World Bank StatVar for a row in the indicators dataframe. """
def row_to_constraints(row):
""" Helper to generate list of constraints. """
constraints_text = ""
next_constraint = 1
while (f"p{next_constraint}" in row and
not pd.isna(row[f"p{next_constraint}"])):
variable = row[f'p{next_constraint}']
constraint = row[f'v{next_constraint}']
constraints_text += f"{variable}: dcs:{constraint}\n"
next_constraint += 1
return constraints_text
# yapf: disable
# Input all required statistical variable fields.
new_stat_var = (TEMPLATE_STAT_VAR
.replace("{INDICATOR}", row['IndicatorCode'].replace(".", "_"))
.replace("{NAME}", row['IndicatorName'])
.replace("{DESCRIPTION}", row['SourceNote'])
.replace("{measuredProperty}", row['measuredProp'])
.replace("{CONSTRAINTS}", row_to_constraints(row))
)
# yapf: enable
# Include or remove option fields.
for optional_col in ([
'populationType', 'statType', 'measurementDenominator'
]):
if not pd.isna(row[optional_col]):
new_stat_var = new_stat_var.replace(f"{{{optional_col}}}",
row[optional_col])
else:
new_stat_var = new_stat_var.replace(
f"{optional_col}: dcs:{{{optional_col}}}\n", "")
return new_stat_var
def group_stat_vars_by_observation_properties(indicator_codes):
""" Groups stat vars by their observation schemas.
Groups Stat Vars by their inclusion of StatVar Observation
properties like measurementMethod or Unit.
The current template MCF schema does not support optional values in the
CSV so we must place these stat vars into
different template MCFs and CSVs.
Args:
indicator_codes: List of World Bank indicator codes with
their Data Commons mappings, as a pandas dataframe.
Returns:
Array of tuples for each statistical variable grouping.
1) template MCF, as a string.
2) columns to include in exported csv, as a list of strings.
3) indicator codes in this grouping, as a list of strings.
"""
# All the statistical observation properties that we included.
properties_of_stat_var_observation = ([
'measurementMethod', 'scalingFactor', 'sourceScalingFactor', 'unit'
])
# List of tuples to return.
tmcfs_for_stat_vars = []
# Dataframe that tracks which values are null.
null_status = indicator_codes.notna()
# Iterates over all permutations of stat var properties being included.
for permutation in list(
itertools.product([False, True],
repeat=len(properties_of_stat_var_observation))):
codes_that_match = null_status.copy()
base_template_mcf = TEMPLATE_TMCF
cols_to_include_in_csv = ['IndicatorCode']
# Loop over each obs column and whether to include it.
for include_col, column in (zip(permutation,
properties_of_stat_var_observation)):
# Filter the dataframe by this observation.
codes_that_match = codes_that_match.query(
f"{column} == {include_col}")
# Include the column in TMCF and column list.
if include_col:
base_template_mcf += f"{column}: C:WorldBank->{column}\n"
cols_to_include_in_csv.append(f"{column}")
tmcfs_for_stat_vars.append(
(base_template_mcf, cols_to_include_in_csv,
list(
indicator_codes.loc[codes_that_match.index]['IndicatorCode'])))
return tmcfs_for_stat_vars
def download_indicator_data(worldbank_countries, indicator_codes):
""" Downloads World Bank country data for all countries and
indicators provided.
Retains only the unique indicator codes provided.
Args:
worldbank_countries: Dataframe with ISO 3166 alpha 3 code for each
country.
indicator_code: Dataframe with INDICATOR_CODES to include.
Returns:
worldbank_dataframe: A tidied pandas dataframe where each row has
the format (indicator code, ISO 3166 alpha 3, year, value)
for all countries and all indicators provided.
"""
worldbank_dataframe = pd.DataFrame()
indicators_to_keep = list(indicator_codes['IndicatorCode'].unique())
for index, country_code in enumerate(worldbank_countries['ISO3166Alpha3']):
print(f"Downloading {country_code}")
country_df = read_worldbank(country_code)
# Remove unneccessary indicators.
country_df = country_df[country_df['IndicatorCode'].isin(
indicators_to_keep)]
# Map country codes to ISO.
country_df['ISO3166Alpha3'] = country_code
# Add new row to main datframe.
worldbank_dataframe = worldbank_dataframe.append(country_df)
# Map indicator codes to unique Statistical Variable.
worldbank_dataframe['StatisticalVariable'] = (
worldbank_dataframe['IndicatorCode'].apply(
lambda code: f"WorldBank/{code.replace('.', '_')}"))
return worldbank_dataframe.rename({'year': 'Year'}, axis=1)
def output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes):
""" Outputs TMCFs and CSVs for each grouping of stat vars.
Args:
worldbank_dataframe: Dataframe containing all indicators for all
countries.
tmcfs_for_stat_vars: Array of tuples of template MCF,
columns on stat var observations,
indicator codes for that template.
indicator_codes -> Dataframe with INDICATOR_CODES to include.
"""
# Only include a subset of columns in the final csv
output_csv = worldbank_dataframe[[
'StatisticalVariable', 'IndicatorCode', 'ISO3166Alpha3', 'Year', 'Value'
]]
# Output tmcf and csv for each unique World Bank grouping.
for index, enum in enumerate(tmcfs_for_stat_vars):
tmcf, stat_var_obs_cols, stat_vars_in_group = enum
if len(stat_vars_in_group) != 0:
with open(f"output/WorldBank_{index}.tmcf", 'w',
newline='') as f_out:
f_out.write(tmcf)
# Output only the indicator codes in that grouping.
matching_csv = output_csv[output_csv['IndicatorCode'].isin(
stat_vars_in_group)]
# Include the Stat Observation columns in the output CSV.
if len(stat_var_obs_cols) > 1:
matching_csv = pd.merge(matching_csv,
indicator_codes[stat_var_obs_cols],
on="IndicatorCode")
# Format to decimals.
matching_csv = matching_csv.round(10)
matching_csv.drop("IndicatorCode",
axis=1).to_csv(f"output/WorldBank_{index}.csv",
float_format='%.10f',
index=False)
def source_scaling_remap(row, scaling_factor_lookup, existing_stat_var_lookup):
""" Scales values by sourceScalingFactor and inputs exisiting stat vars.
First, this function converts all values to per capita. Some measures
in the World Bank dataset are per thousand or per hundred thousand, but
we need to scale these to the common denomination format. Secondly,
some statistical variables such as Count_Person_InLaborForce are not
World Bank specific and need to be replaced. Both of these are imputted
from the following two lists in args.
Args:
scaling_factor_lookup: A dictionary of a mapping between World Bank
indicator code to the respective numeric scaling factor.
existing_stat_var_lookup: A dictionary of a mapping between all
indicator to be replaced with the exisiting stat var to replace it.
"""
indicator_code = row['IndicatorCode']
if indicator_code in scaling_factor_lookup:
row['Value'] = (row['Value'] /
int(scaling_factor_lookup[indicator_code]))
if indicator_code in existing_stat_var_lookup:
row['StatisticalVariable'] = ("dcid:" +
existing_stat_var_lookup[indicator_code])
return row
def main(_):
# Load statistical variable configuration file.
indicator_codes = pd.read_csv("WorldBankIndicators.csv")
# Add source description to note.
def add_source_to_description(row):
if not pd.isna(row['Source']):
return row['SourceNote'] + " " + str(row['Source'])
else:
return row['SourceNote']
indicator_codes['SourceNote'] = indicator_codes.apply(
add_source_to_description, axis=1)
# Generate stat vars
with open("output/WorldBank_StatisticalVariables.mcf", "w+") as f_out:
# Generate StatVars for fields that don't exist. Some fields such as
# Count_Person_Unemployed are already statistical variables so we do
# not need to recreate them.
for _, row in indicator_codes[
indicator_codes['ExistingStatVar'].isna()].iterrows():
f_out.write(build_stat_vars_from_indicator_list(row))
# Create template MCFs for each grouping of stat vars.
tmcfs_for_stat_vars = (
group_stat_vars_by_observation_properties(indicator_codes))
# Download data for all countries.
worldbank_countries = pd.read_csv("WorldBankCountries.csv")
worldbank_dataframe = download_indicator_data(worldbank_countries,
indicator_codes)
# Remap columns to match expected format.
worldbank_dataframe['Value'] = pd.to_numeric(worldbank_dataframe['Value'])
worldbank_dataframe['ISO3166Alpha3'] = (
worldbank_dataframe['ISO3166Alpha3'].apply(
lambda code: "dcs:country/" + code))
worldbank_dataframe['StatisticalVariable'] = \
worldbank_dataframe['StatisticalVariable'].apply(
lambda code: "dcs:" + code)
# Scale values by scaling factor and replace exisiting StatVars.
scaling_factor_lookup = (indicator_codes.set_index("IndicatorCode")
['sourceScalingFactor'].dropna().to_dict())
existing_stat_var_lookup = (indicator_codes.set_index("IndicatorCode")
['ExistingStatVar'].dropna().to_dict())
worldbank_dataframe = worldbank_dataframe.apply(
lambda row: source_scaling_remap(row, scaling_factor_lookup,
existing_stat_var_lookup),
axis=1)
# Convert integer columns.
int_cols = (list(indicator_codes[indicator_codes['ConvertToInt'] == True]
['IndicatorCode'].unique()))
worldbank_subset = worldbank_dataframe[
worldbank_dataframe['IndicatorCode'].isin(int_cols)].index
worldbank_dataframe.loc[worldbank_subset, "Value"] = (pd.to_numeric(
worldbank_dataframe.loc[worldbank_subset, "Value"], downcast="integer"))
# Output final CSVs and variables.
output_csv_and_tmcf_by_grouping(worldbank_dataframe, tmcfs_for_stat_vars,
indicator_codes)
if __name__ == '__main__':
app.run(main)
| 40.308861 | 80 | 0.651803 | [
"Apache-2.0"
] | IanCostello/data | scripts/world_bank/worldbank.py | 15,922 | Python |
#coding=utf-8
#
# Created on Apr 23, 2014, by Junn
#
#
import json
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from rest_framework.response import Response as RfResponse
from core import codes
import urllib
import httplib
import requests
from django.core.files.uploadedfile import SimpleUploadedFile
def request_file(url):
'''从远端下载文件, 并构建request.FILES中的uploaded file对象返回.
@param url: 文件url路径, 如http://abc.im/12345.jpg
@return: SimpleUploadedFile object, it is containned by the request.FILES(dictionary-like object)
'''
if not url:
return
response = requests.get(url)
return SimpleUploadedFile('file', response.content)
def send_request(host, send_url, method='GET', port=80, params={}, timeout=30,
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}):
'''发起http请求. 执行结果返回响应字符串
@param: The sample parameters format like following:
params = {'token': 'dF0zeqAPWs'}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
host = 'fir.im'
port = 80
method = 'GET'
send_url = '/api/v2/app/version/541a7131f?token=dF0zeqBMXAP'
'''
encoded_params = urllib.urlencode(params)
conn = httplib.HTTPConnection(host, port=port, timeout=timeout)
conn.request(method, send_url, encoded_params, headers)
response = conn.getresponse()
response_str = response.read()
conn.close()
return response_str
class JResponse(HttpResponse):
'''for simple dict response, like success and failed, etc'''
def __init__(self, result, status=200, *args, **kwargs):
if not isinstance(result, list):
if 'errors' in result.keys():
dt = result.pop('errors', {}) or {}
result['msg'] = ''.join([e[0] for e in dt.values()])
super(JResponse, self).__init__(
json.dumps(result), status=status, mimetype='application/json', *args, **kwargs
)
def standard_response(template, req, context):
'''返回http Web response'''
return render_to_response(template, RequestContext(req, context))
class Response(RfResponse):
'''for object json response'''
def __init__(self, data, *args, **kwargs):
if isinstance(data, dict) and 'code' in data.keys(): #data为dict, 且已有code则无需再添加code返回
super(Response, self).__init__(data, *args, **kwargs)
else:
super(Response, self).__init__(codes.append('ok', {'data': data}), *args, **kwargs)
## 注: 此必须声明为函数, 不可声明为常量. 常量值将只在模块import时被赋值
def ok(data={}):
'''data为字典类型数据'''
return JResponse(codes.append('ok', data)) if data else resp('ok')
def failed(msg=''):
return resp('failed', msg)
def object_not_found():
return resp('object_not_found')
def http404():
return resp('not_found')
def resp(crr, msg=''):
'''返回常量错误码. msg可格式化具有占位符的字符串
params:
@crr 错误码标识
'''
return JResponse(codes.fmat(crr, msg))
| 30.745283 | 107 | 0.6278 | [
"MIT"
] | dlooto/driver-vision | apps/utils/http.py | 3,479 | Python |
#coding:utf-8
"""
@author : linkin
@email : yooleak@outlook.com
@date : 2018-11-07
"""
import asyncio
import datetime
async def send_async_http(session,method,url,*,
retries=1,
interval=1,
wait_factor=2,
timeout=30,
success_callback=None,
fail_callback=None,
**kwargs) -> dict:
"""
发送一个异步请求至某个特定url,实现失败重试
每一次失败后会延时一段时间再去重试,延时时间由
interval和wait_factor决定
:param session:请求的异步session
:param method:请求方法
:param url:请求url
:param retries:失败重试次数
:param interval:失败后的再次异步请求的延时时长
:param wait_factor:每一次失败后延时乘以这个因子,延长重试等待时间,一般1<wf<2,即延时最多2^retries秒
:param timeout:连接超时时长
:param success_callback:成功回调函数
:param fail_callback:失败回调函数
:param kwargs:其他键值参数
:return:返回字典结果
"""
exception = None
ret = {'cost':None,'code':0,'exception':exception,'tries':-1}
wait_interval = interval
if method.lower() not in ['get', 'head', 'post']:
return ret
if retries == -1: # -1 表示无限次失败重试
attempt = -1
elif retries == 0: # 0 表示不进行失败重试
attempt = 1
else:
attempt = retries + 1
while attempt != 0:
try:
start = datetime.datetime.now()
async with getattr(session,method)(url,timeout=timeout,**kwargs) as response:
end = datetime.datetime.now()
t = (end - start).total_seconds()
code = response.status
ret = {'cost': t, 'code': code, 'tries': retries - attempt+1}
if success_callback:
success_callback(ret)
return ret
except Exception as e:
ret['exception'] = e
ret['tries'] += 1
await asyncio.sleep(wait_interval)
wait_interval = wait_interval * wait_factor
attempt-=1
if fail_callback:
fail_callback(ret)
return ret | 32.203125 | 89 | 0.54197 | [
"MIT"
] | 01ly/FooProxy | tools/async_tools.py | 2,375 | Python |
from django.test import TestCase, Client
from assertpy import assert_that
from rest_framework import status
from rest_framework.test import APIClient
from .factories import RecipeFactory, IngredientFactory
from ..models import Recipe
from ..serializers import RecipeSerializer
import factory
LIST_SIZE = 3
class RecipeUpdateTestCase(TestCase):
@classmethod
def setUpTestData(cls) -> None:
cls.api_client = APIClient()
cls.client = APIClient()
def test_client_is_not_api_client(self) -> None:
# it seem that after this is called Django magic happens and the Django client in injected
# that is, cls.client = APIClient() does not work as expected
assert_that(self.client).is_instance_of(Client)
assert_that(self.api_client).is_instance_of(APIClient)
def test_should_update_recipe_without_ingredients(self) -> None:
recipe = RecipeFactory(ingredients=None)
data = RecipeSerializer(recipe).data
response = self.api_client.put(f'/recipes/{recipe.id}', data, format='json')
db_recipe = Recipe.objects.get(id=recipe.id)
db_data = RecipeSerializer(db_recipe).data
assert_that(response.status_code).is_equal_to(status.HTTP_200_OK)
assert_that(db_data).is_equal_to(data)
def test_should_update_recipe_with_ingredients(self) -> None:
recipe = RecipeFactory(
ingredients=factory.RelatedFactoryList(IngredientFactory, factory_related_name='recipe', size=LIST_SIZE)
)
data = RecipeSerializer(recipe).data
response = self.api_client.put(f'/recipes/{recipe.id}', data, format='json')
db_recipe = Recipe.objects.get(id=recipe.id)
db_data = RecipeSerializer(db_recipe).data
assert_that(response.status_code).is_equal_to(status.HTTP_200_OK)
assert_that(db_data).is_equal_to(data)
def test_should_return_404(self) -> None:
response = self.api_client.put(f'/recipes/1000', {}, format='json')
assert_that(response.status_code).is_equal_to(status.HTTP_404_NOT_FOUND)
def test_should_return_400_when_missing_required_field(self) -> None:
recipe = RecipeFactory(ingredients=None)
data = RecipeSerializer(recipe).data
del data['name']
response = self.api_client.put(f'/recipes/{recipe.id}', data, format='json')
assert_that(response.status_code).is_equal_to(status.HTTP_400_BAD_REQUEST)
def test_should_return_400_when_sent_extra_fields(self) -> None:
recipe = RecipeFactory(ingredients=None)
data = RecipeSerializer(recipe).data
data['foo'] = 'bar'
response = self.api_client.put(f'/recipes/{recipe.id}', data, format='json')
assert_that(response.status_code).is_equal_to(status.HTTP_400_BAD_REQUEST)
| 41.161765 | 116 | 0.720972 | [
"MIT"
] | djensen47/recipe-api-django | src/recipes/tests/test_recipe_update.py | 2,799 | Python |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .. import client_query_batch
def test_client_query_batch(capsys, client):
job = client_query_batch.client_query_batch(client)
out, err = capsys.readouterr()
assert "Job {} is currently in state {}".format(job.job_id, job.state) in out
| 36.130435 | 81 | 0.754513 | [
"Apache-2.0"
] | curlup/google-cloud-python | bigquery/samples/tests/test_client_query_batch.py | 831 | Python |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.BatchMatMul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class BatchMatmulOpTest(test.TestCase):
# Uses numpy to compute batch_matmul(x, y, adjoint_a, adjoint_b).
def _npBatchMatmul(self, x, y, adjoint_a, adjoint_b):
# output's shape depends on adj[0] and adj[1]
d0 = x.shape[-2] if not adjoint_a else x.shape[-1]
d2 = y.shape[-1] if not adjoint_b else y.shape[-2]
batch_dims = x.shape[:-2]
num = np.prod(batch_dims)
z = np.empty(list(batch_dims) + [d0, d2], dtype=x.dtype)
xr = x.reshape([num, x.shape[-2], x.shape[-1]])
yr = y.reshape([num, y.shape[-2], y.shape[-1]])
zr = z.reshape([num, z.shape[-2], z.shape[-1]])
for i in range(num):
a = np.matrix(xr[i, :, :])
if adjoint_a:
a = a.transpose().conj()
b = np.matrix(yr[i, :, :])
if adjoint_b:
b = b.transpose().conj()
zr[i, :, :] = a * b
return z
# Test _npBatchMatMul works.
def testNpVersion(self):
x = np.array([0., 1., 2., 3.]).reshape([1, 2, 2])
y = np.array([1., 2., 3., 4.]).reshape([1, 2, 2])
z0 = self._npBatchMatmul(x, y, False, False)
z1 = np.array([3., 4., 11., 16.]).reshape([1, 2, 2])
self.assertTrue(np.array_equal(z0, z1))
x = np.array([1., (1j), (-1.), (-1j)]).reshape([1, 2, 2])
y = x * np.complex(1, 1) # rotate x 90 degree
z0 = self._npBatchMatmul(x, y, False, False)
z1 = np.array([2., (2.j), -2., (-2.j)]).reshape([1, 2, 2])
self.assertTrue(np.array_equal(z0, z1))
z0 = self._npBatchMatmul(x, y, False, True)
z1 = np.array([(2. - 2.j), (-2. + 2.j), (-2. + 2.j), (2. - 2.j)]).reshape(
[1, 2, 2])
self.assertTrue(np.array_equal(z0, z1))
z0 = self._npBatchMatmul(x, y, True, False)
z1 = np.array([(2. + 2.j), (-2. + 2.j), (2. - 2.j), (2. + 2.j)]).reshape(
[1, 2, 2])
self.assertTrue(np.array_equal(z0, z1))
# Compares _tfpBatchMatmul(x, y, alpha, adj) and _npBatchMatMul(x, y, alpha,
# adj)
def _compare(self, x_in, y_in, adjoint_a, adjoint_b, static_shape=True):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
is_floating = x.dtype != np.int32
tol = 100 * np.finfo(x.dtype).eps if is_floating else 0
with self.test_session(use_gpu=is_floating) as sess:
if static_shape:
z0 = math_ops.matmul(x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = z0.eval()
else:
x_ph = array_ops.placeholder(x.dtype)
y_ph = array_ops.placeholder(y.dtype)
z0 = math_ops.matmul(
x_ph, y_ph, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
z0_val = sess.run(z0, feed_dict={x_ph: x, y_ph: y})
z1 = self._npBatchMatmul(x, y, adjoint_a, adjoint_b)
self.assertAllClose(z0_val, z1, rtol=tol, atol=tol)
def _rand(self, shape, dtype):
vals = np.array(np.random.normal(-10, 10, np.prod(shape)), dtype=dtype)
if dtype in (np.complex64, np.complex128):
imag = np.array(np.random.normal(-10, 10, np.prod(shape)), dtype=dtype)
vals += 1j * imag
return vals.reshape(shape)
def _testNonEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def compareNonEmpty(self, a_shape, b_shape):
self._compare(
self._rand(a_shape, dtype),
self._rand(b_shape, dtype), adjoint_a, adjoint_b, use_static_shape)
compareNonEmpty(self, [1, 2, 3], [1, 3, 5])
compareNonEmpty(self, [1, 2, 3], [1, 3, 1])
compareNonEmpty(self, [1, 2, 3], [1, 3, 5])
compareNonEmpty(self, [7, 1, 3], [7, 3, 5])
compareNonEmpty(self, [7, 2, 3], [7, 3, 1])
compareNonEmpty(self, [7, 2, 3], [7, 3, 5])
compareNonEmpty(self, [10, 64, 75], [10, 75, 30])
compareNonEmpty(self, [5, 7, 2, 3], [5, 7, 3, 5])
def _testEmpty(self, dtype, adjoint_a, adjoint_b, use_static_shape):
def compareEmpty(self, a_shape, b_shape):
self._compare(
np.zeros(a_shape).astype(dtype),
np.zeros(b_shape).astype(dtype), adjoint_a, adjoint_b,
use_static_shape)
compareEmpty(self, [0, 3, 2], [0, 2, 4])
compareEmpty(self, [3, 0, 2], [3, 2, 5])
compareEmpty(self, [3, 3, 2], [3, 2, 0])
def _GetBatchMatmulOpTest(dtype, adjoint_a, adjoint_b, use_static_shape):
def Test(self):
np.random.seed(42)
self._testNonEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
self._testEmpty(dtype, adjoint_a, adjoint_b, use_static_shape)
return Test
class BatchMatmulGradientTest(test.TestCase):
# loss = sum(batch_matmul(x, y)). Verify dl/dx and dl/dy via the
# gradient checker.
def _checkGrad(self, x_in, y_in, adjoint_a, adjoint_b):
x_t_shape = x_in.shape[:-2] + (x_in.shape[-1], x_in.shape[-2])
y_t_shape = y_in.shape[:-2] + (y_in.shape[-1], y_in.shape[-2])
x = x_in if not adjoint_a else x_in.reshape(x_t_shape)
y = y_in if not adjoint_b else y_in.reshape(y_t_shape)
epsilon = np.finfo(x.dtype).eps
delta = epsilon**(1.0 / 3.0)
with self.test_session(use_gpu=True):
inx = constant_op.constant(x)
iny = constant_op.constant(y)
z = math_ops.matmul(inx, iny, adjoint_a, adjoint_b)
loss = math_ops.reduce_sum(z)
((x_jacob_t, x_jacob_n),
(y_jacob_t, y_jacob_n)) = gradient_checker.compute_gradient(
[inx, iny], [x.shape, y.shape],
loss, [1],
x_init_value=[x, y],
delta=delta)
tol = 20 * delta
self.assertAllClose(x_jacob_t, x_jacob_n, rtol=tol, atol=tol)
self.assertAllClose(y_jacob_t, y_jacob_n, rtol=tol, atol=tol)
# Tests a batched matmul of x, and y: x is a 3D tensor of shape [b,
# n, k] y is a 3D tensor of shape [b, k, m] the batched matmul
# computes z of shape [b, n, m], where z[i, :, :] = x[i, :, :]
# matmul y[i, :, :]
def _compare(self, b, n, k, m, dtype, adjoint_a, adjoint_b):
np.random.seed(42)
x = np.random.normal(0, 1, b * n * k).astype(dtype).reshape([b, n, k])
if dtype in (np.complex64, np.complex128):
x.imag = np.random.normal(0, 1,
b * n * k).astype(dtype).reshape([b, n, k])
y = np.random.normal(0, 1, b * k * m).astype(dtype).reshape([b, k, m])
if dtype in (np.complex64, np.complex128):
y.imag = np.random.normal(0, 1,
b * k * m).astype(dtype).reshape([b, k, m])
self._checkGrad(x, y, adjoint_a, adjoint_b)
def _GetBatchMatmulGradientTest(dtype, adjoint_a, adjoint_b):
def Test(self):
self._compare(1, 2, 3, 5, dtype, adjoint_a, adjoint_b)
self._compare(3, 4, 7, 10, dtype, adjoint_a, adjoint_b)
return Test
if __name__ == "__main__":
for dtype_ in [
np.float16, np.float32, np.float64, np.complex64, np.complex128, np.int32
]:
for adjoint_a_ in False, True:
for adjoint_b_ in False, True:
name = "%s_%s_%s" % (dtype_.__name__, adjoint_a_, adjoint_b_)
for use_static_shape in True, False:
setattr(BatchMatmulOpTest,
"testBatchMatmulOp_" + name + ("_%s" % use_static_shape),
_GetBatchMatmulOpTest(dtype_, adjoint_a_, adjoint_b_,
use_static_shape))
if dtype_ is not np.int32:
setattr(BatchMatmulGradientTest, "testBatchMatmulGradient_" + name,
_GetBatchMatmulGradientTest(dtype_, adjoint_a_, adjoint_b_))
test.main()
| 40.065728 | 80 | 0.62374 | [
"Apache-2.0"
] | 1559603450/tensorflow | tensorflow/python/kernel_tests/batch_matmul_op_test.py | 8,534 | Python |
"""Here we define the exported functions, types, etc... which need to be
exported through a global C pointer.
Each dictionary contains name -> index pair.
Whenever you change one index, you break the ABI (and the ABI version number
should be incremented). Whenever you add an item to one of the dict, the API
needs to be updated.
When adding a function, make sure to use the next integer not used as an index
(in case you use an existing index or jump, the build will stop and raise an
exception, so it should hopefully not get unnoticed).
"""
from __future__ import division, absolute_import, print_function
multiarray_global_vars = {
'NPY_NUMUSERTYPES': 7,
'NPY_DEFAULT_ASSIGN_CASTING': 292,
}
multiarray_global_vars_types = {
'NPY_NUMUSERTYPES': 'int',
'NPY_DEFAULT_ASSIGN_CASTING': 'NPY_CASTING',
}
multiarray_scalar_bool_values = {
'_PyArrayScalar_BoolValues': 9
}
multiarray_types_api = {
'PyBigArray_Type': 1,
'PyArray_Type': 2,
'PyArrayDescr_Type': 3,
'PyArrayFlags_Type': 4,
'PyArrayIter_Type': 5,
'PyArrayMultiIter_Type': 6,
'PyBoolArrType_Type': 8,
'PyGenericArrType_Type': 10,
'PyNumberArrType_Type': 11,
'PyIntegerArrType_Type': 12,
'PySignedIntegerArrType_Type': 13,
'PyUnsignedIntegerArrType_Type': 14,
'PyInexactArrType_Type': 15,
'PyFloatingArrType_Type': 16,
'PyComplexFloatingArrType_Type': 17,
'PyFlexibleArrType_Type': 18,
'PyCharacterArrType_Type': 19,
'PyByteArrType_Type': 20,
'PyShortArrType_Type': 21,
'PyIntArrType_Type': 22,
'PyLongArrType_Type': 23,
'PyLongLongArrType_Type': 24,
'PyUByteArrType_Type': 25,
'PyUShortArrType_Type': 26,
'PyUIntArrType_Type': 27,
'PyULongArrType_Type': 28,
'PyULongLongArrType_Type': 29,
'PyFloatArrType_Type': 30,
'PyDoubleArrType_Type': 31,
'PyLongDoubleArrType_Type': 32,
'PyCFloatArrType_Type': 33,
'PyCDoubleArrType_Type': 34,
'PyCLongDoubleArrType_Type': 35,
'PyObjectArrType_Type': 36,
'PyStringArrType_Type': 37,
'PyUnicodeArrType_Type': 38,
'PyVoidArrType_Type': 39,
# End 1.5 API
'PyTimeIntegerArrType_Type': 214,
'PyDatetimeArrType_Type': 215,
'PyTimedeltaArrType_Type': 216,
'PyHalfArrType_Type': 217,
'NpyIter_Type': 218,
# End 1.6 API
}
#define NPY_NUMUSERTYPES (*(int *)PyArray_API[6])
#define PyBoolArrType_Type (*(PyTypeObject *)PyArray_API[7])
#define _PyArrayScalar_BoolValues ((PyBoolScalarObject *)PyArray_API[8])
multiarray_funcs_api = {
'PyArray_GetNDArrayCVersion': 0,
'PyArray_SetNumericOps': 40,
'PyArray_GetNumericOps': 41,
'PyArray_INCREF': 42,
'PyArray_XDECREF': 43,
'PyArray_SetStringFunction': 44,
'PyArray_DescrFromType': 45,
'PyArray_TypeObjectFromType': 46,
'PyArray_Zero': 47,
'PyArray_One': 48,
'PyArray_CastToType': 49,
'PyArray_CastTo': 50,
'PyArray_CastAnyTo': 51,
'PyArray_CanCastSafely': 52,
'PyArray_CanCastTo': 53,
'PyArray_ObjectType': 54,
'PyArray_DescrFromObject': 55,
'PyArray_ConvertToCommonType': 56,
'PyArray_DescrFromScalar': 57,
'PyArray_DescrFromTypeObject': 58,
'PyArray_Size': 59,
'PyArray_Scalar': 60,
'PyArray_FromScalar': 61,
'PyArray_ScalarAsCtype': 62,
'PyArray_CastScalarToCtype': 63,
'PyArray_CastScalarDirect': 64,
'PyArray_ScalarFromObject': 65,
'PyArray_GetCastFunc': 66,
'PyArray_FromDims': 67,
'PyArray_FromDimsAndDataAndDescr': 68,
'PyArray_FromAny': 69,
'PyArray_EnsureArray': 70,
'PyArray_EnsureAnyArray': 71,
'PyArray_FromFile': 72,
'PyArray_FromString': 73,
'PyArray_FromBuffer': 74,
'PyArray_FromIter': 75,
'PyArray_Return': 76,
'PyArray_GetField': 77,
'PyArray_SetField': 78,
'PyArray_Byteswap': 79,
'PyArray_Resize': 80,
'PyArray_MoveInto': 81,
'PyArray_CopyInto': 82,
'PyArray_CopyAnyInto': 83,
'PyArray_CopyObject': 84,
'PyArray_NewCopy': 85,
'PyArray_ToList': 86,
'PyArray_ToString': 87,
'PyArray_ToFile': 88,
'PyArray_Dump': 89,
'PyArray_Dumps': 90,
'PyArray_ValidType': 91,
'PyArray_UpdateFlags': 92,
'PyArray_New': 93,
'PyArray_NewFromDescr': 94,
'PyArray_DescrNew': 95,
'PyArray_DescrNewFromType': 96,
'PyArray_GetPriority': 97,
'PyArray_IterNew': 98,
'PyArray_MultiIterNew': 99,
'PyArray_PyIntAsInt': 100,
'PyArray_PyIntAsIntp': 101,
'PyArray_Broadcast': 102,
'PyArray_FillObjectArray': 103,
'PyArray_FillWithScalar': 104,
'PyArray_CheckStrides': 105,
'PyArray_DescrNewByteorder': 106,
'PyArray_IterAllButAxis': 107,
'PyArray_CheckFromAny': 108,
'PyArray_FromArray': 109,
'PyArray_FromInterface': 110,
'PyArray_FromStructInterface': 111,
'PyArray_FromArrayAttr': 112,
'PyArray_ScalarKind': 113,
'PyArray_CanCoerceScalar': 114,
'PyArray_NewFlagsObject': 115,
'PyArray_CanCastScalar': 116,
'PyArray_CompareUCS4': 117,
'PyArray_RemoveSmallest': 118,
'PyArray_ElementStrides': 119,
'PyArray_Item_INCREF': 120,
'PyArray_Item_XDECREF': 121,
'PyArray_FieldNames': 122,
'PyArray_Transpose': 123,
'PyArray_TakeFrom': 124,
'PyArray_PutTo': 125,
'PyArray_PutMask': 126,
'PyArray_Repeat': 127,
'PyArray_Choose': 128,
'PyArray_Sort': 129,
'PyArray_ArgSort': 130,
'PyArray_SearchSorted': 131,
'PyArray_ArgMax': 132,
'PyArray_ArgMin': 133,
'PyArray_Reshape': 134,
'PyArray_Newshape': 135,
'PyArray_Squeeze': 136,
'PyArray_View': 137,
'PyArray_SwapAxes': 138,
'PyArray_Max': 139,
'PyArray_Min': 140,
'PyArray_Ptp': 141,
'PyArray_Mean': 142,
'PyArray_Trace': 143,
'PyArray_Diagonal': 144,
'PyArray_Clip': 145,
'PyArray_Conjugate': 146,
'PyArray_Nonzero': 147,
'PyArray_Std': 148,
'PyArray_Sum': 149,
'PyArray_CumSum': 150,
'PyArray_Prod': 151,
'PyArray_CumProd': 152,
'PyArray_All': 153,
'PyArray_Any': 154,
'PyArray_Compress': 155,
'PyArray_Flatten': 156,
'PyArray_Ravel': 157,
'PyArray_MultiplyList': 158,
'PyArray_MultiplyIntList': 159,
'PyArray_GetPtr': 160,
'PyArray_CompareLists': 161,
'PyArray_AsCArray': 162,
'PyArray_As1D': 163,
'PyArray_As2D': 164,
'PyArray_Free': 165,
'PyArray_Converter': 166,
'PyArray_IntpFromSequence': 167,
'PyArray_Concatenate': 168,
'PyArray_InnerProduct': 169,
'PyArray_MatrixProduct': 170,
'PyArray_CopyAndTranspose': 171,
'PyArray_Correlate': 172,
'PyArray_TypestrConvert': 173,
'PyArray_DescrConverter': 174,
'PyArray_DescrConverter2': 175,
'PyArray_IntpConverter': 176,
'PyArray_BufferConverter': 177,
'PyArray_AxisConverter': 178,
'PyArray_BoolConverter': 179,
'PyArray_ByteorderConverter': 180,
'PyArray_OrderConverter': 181,
'PyArray_EquivTypes': 182,
'PyArray_Zeros': 183,
'PyArray_Empty': 184,
'PyArray_Where': 185,
'PyArray_Arange': 186,
'PyArray_ArangeObj': 187,
'PyArray_SortkindConverter': 188,
'PyArray_LexSort': 189,
'PyArray_Round': 190,
'PyArray_EquivTypenums': 191,
'PyArray_RegisterDataType': 192,
'PyArray_RegisterCastFunc': 193,
'PyArray_RegisterCanCast': 194,
'PyArray_InitArrFuncs': 195,
'PyArray_IntTupleFromIntp': 196,
'PyArray_TypeNumFromName': 197,
'PyArray_ClipmodeConverter': 198,
'PyArray_OutputConverter': 199,
'PyArray_BroadcastToShape': 200,
'_PyArray_SigintHandler': 201,
'_PyArray_GetSigintBuf': 202,
'PyArray_DescrAlignConverter': 203,
'PyArray_DescrAlignConverter2': 204,
'PyArray_SearchsideConverter': 205,
'PyArray_CheckAxis': 206,
'PyArray_OverflowMultiplyList': 207,
'PyArray_CompareString': 208,
'PyArray_MultiIterFromObjects': 209,
'PyArray_GetEndianness': 210,
'PyArray_GetNDArrayCFeatureVersion': 211,
'PyArray_Correlate2': 212,
'PyArray_NeighborhoodIterNew': 213,
# End 1.5 API
'PyArray_SetDatetimeParseFunction': 219,
'PyArray_DatetimeToDatetimeStruct': 220,
'PyArray_TimedeltaToTimedeltaStruct': 221,
'PyArray_DatetimeStructToDatetime': 222,
'PyArray_TimedeltaStructToTimedelta': 223,
# NDIter API
'NpyIter_New': 224,
'NpyIter_MultiNew': 225,
'NpyIter_AdvancedNew': 226,
'NpyIter_Copy': 227,
'NpyIter_Deallocate': 228,
'NpyIter_HasDelayedBufAlloc': 229,
'NpyIter_HasExternalLoop': 230,
'NpyIter_EnableExternalLoop': 231,
'NpyIter_GetInnerStrideArray': 232,
'NpyIter_GetInnerLoopSizePtr': 233,
'NpyIter_Reset': 234,
'NpyIter_ResetBasePointers': 235,
'NpyIter_ResetToIterIndexRange': 236,
'NpyIter_GetNDim': 237,
'NpyIter_GetNOp': 238,
'NpyIter_GetIterNext': 239,
'NpyIter_GetIterSize': 240,
'NpyIter_GetIterIndexRange': 241,
'NpyIter_GetIterIndex': 242,
'NpyIter_GotoIterIndex': 243,
'NpyIter_HasMultiIndex': 244,
'NpyIter_GetShape': 245,
'NpyIter_GetGetMultiIndex': 246,
'NpyIter_GotoMultiIndex': 247,
'NpyIter_RemoveMultiIndex': 248,
'NpyIter_HasIndex': 249,
'NpyIter_IsBuffered': 250,
'NpyIter_IsGrowInner': 251,
'NpyIter_GetBufferSize': 252,
'NpyIter_GetIndexPtr': 253,
'NpyIter_GotoIndex': 254,
'NpyIter_GetDataPtrArray': 255,
'NpyIter_GetDescrArray': 256,
'NpyIter_GetOperandArray': 257,
'NpyIter_GetIterView': 258,
'NpyIter_GetReadFlags': 259,
'NpyIter_GetWriteFlags': 260,
'NpyIter_DebugPrint': 261,
'NpyIter_IterationNeedsAPI': 262,
'NpyIter_GetInnerFixedStrideArray': 263,
'NpyIter_RemoveAxis': 264,
'NpyIter_GetAxisStrideArray': 265,
'NpyIter_RequiresBuffering': 266,
'NpyIter_GetInitialDataPtrArray': 267,
'NpyIter_CreateCompatibleStrides': 268,
#
'PyArray_CastingConverter': 269,
'PyArray_CountNonzero': 270,
'PyArray_PromoteTypes': 271,
'PyArray_MinScalarType': 272,
'PyArray_ResultType': 273,
'PyArray_CanCastArrayTo': 274,
'PyArray_CanCastTypeTo': 275,
'PyArray_EinsteinSum': 276,
'PyArray_NewLikeArray': 277,
'PyArray_GetArrayParamsFromObject': 278,
'PyArray_ConvertClipmodeSequence': 279,
'PyArray_MatrixProduct2': 280,
# End 1.6 API
'NpyIter_IsFirstVisit': 281,
'PyArray_SetBaseObject': 282,
'PyArray_CreateSortedStridePerm': 283,
'PyArray_RemoveAxesInPlace': 284,
'PyArray_DebugPrint': 285,
'PyArray_FailUnlessWriteable': 286,
'PyArray_SetUpdateIfCopyBase': 287,
'PyDataMem_NEW': 288,
'PyDataMem_FREE': 289,
'PyDataMem_RENEW': 290,
'PyDataMem_SetEventHook': 291,
'PyArray_MapIterSwapAxes': 293,
'PyArray_MapIterArray': 294,
'PyArray_MapIterNext': 295,
# End 1.7 API
'PyArray_Partition': 296,
'PyArray_ArgPartition': 297,
'PyArray_SelectkindConverter': 298,
'PyDataMem_NEW_ZEROED': 299,
# End 1.8 API
}
ufunc_types_api = {
'PyUFunc_Type': 0
}
ufunc_funcs_api = {
'PyUFunc_FromFuncAndData': 1,
'PyUFunc_RegisterLoopForType': 2,
'PyUFunc_GenericFunction': 3,
'PyUFunc_f_f_As_d_d': 4,
'PyUFunc_d_d': 5,
'PyUFunc_f_f': 6,
'PyUFunc_g_g': 7,
'PyUFunc_F_F_As_D_D': 8,
'PyUFunc_F_F': 9,
'PyUFunc_D_D': 10,
'PyUFunc_G_G': 11,
'PyUFunc_O_O': 12,
'PyUFunc_ff_f_As_dd_d': 13,
'PyUFunc_ff_f': 14,
'PyUFunc_dd_d': 15,
'PyUFunc_gg_g': 16,
'PyUFunc_FF_F_As_DD_D': 17,
'PyUFunc_DD_D': 18,
'PyUFunc_FF_F': 19,
'PyUFunc_GG_G': 20,
'PyUFunc_OO_O': 21,
'PyUFunc_O_O_method': 22,
'PyUFunc_OO_O_method': 23,
'PyUFunc_On_Om': 24,
'PyUFunc_GetPyValues': 25,
'PyUFunc_checkfperr': 26,
'PyUFunc_clearfperr': 27,
'PyUFunc_getfperr': 28,
'PyUFunc_handlefperr': 29,
'PyUFunc_ReplaceLoopBySignature': 30,
'PyUFunc_FromFuncAndDataAndSignature': 31,
'PyUFunc_SetUsesArraysAsData': 32,
# End 1.5 API
'PyUFunc_e_e': 33,
'PyUFunc_e_e_As_f_f': 34,
'PyUFunc_e_e_As_d_d': 35,
'PyUFunc_ee_e': 36,
'PyUFunc_ee_e_As_ff_f': 37,
'PyUFunc_ee_e_As_dd_d': 38,
# End 1.6 API
'PyUFunc_DefaultTypeResolver': 39,
'PyUFunc_ValidateCasting': 40,
# End 1.7 API
'PyUFunc_RegisterLoopForDescr': 41,
# End 1.8 API
}
# List of all the dicts which define the C API
# XXX: DO NOT CHANGE THE ORDER OF TUPLES BELOW !
multiarray_api = (
multiarray_global_vars,
multiarray_global_vars_types,
multiarray_scalar_bool_values,
multiarray_types_api,
multiarray_funcs_api,
)
ufunc_api = (
ufunc_funcs_api,
ufunc_types_api
)
full_api = multiarray_api + ufunc_api
| 44.111111 | 78 | 0.476728 | [
"BSD-3-Clause"
] | MatthieuDartiailh/numpy | numpy/core/code_generators/numpy_api.py | 18,262 | Python |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
import time
from django.test import LiveServerTestCase
MAX_WAIT = 10
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def wait_for_row_in_list_table(self, row_text):
start_time = time.time()
while True:
try:
table = self.browser.find_element_by_id('id_list_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_can_start_a_list_for_one_user(self):
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
self.browser.get(self.live_server_url)
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('To-Do', header_text)
# She is invited to enter a to-do item straight away
input_box = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
input_box.get_attribute('placeholder'),
'Enter a to-do item'
)
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
input_box.send_keys('Buy peacock feathers')
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an item in a to-do list
input_box.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very methodical)
input_box = self.browser.find_element_by_id('id_new_item')
input_box.send_keys('Use peacock feathers to make a fly')
input_box.send_keys(Keys.ENTER)
# The page updates again, and now shows both items on her list
self.wait_for_row_in_list_table('1: Buy peacock feathers')
self.wait_for_row_in_list_table('2: Use peacock feathers to make a fly')
# Satisfied, she goes back to sleep
def test_multiple_users_can_start_lists_at_different_urls(self):
# Edith starts a new to-do list
self.browser.get(self.live_server_url)
input_box = self.browser.find_element_by_id('id_new_item')
input_box.send_keys('Buy peacock feathers')
input_box.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy peacock feathers')
# She notices that her list has a unique URL
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/lists/.+')
# Now a new user, Francis, comes along to the site.
# We use a new browser session to make sure that no information
# of Edith's is coming through from cookies etc
self.browser.quit()
self.browser = webdriver.Firefox()
# Francis visits the home page. There is no sign of Edith's list
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertNotIn('make a fly', page_text)
# Francis starts a new list by entering a new item. He
# is less interesting than Edith...
input_box = self.browser.find_element_by_id('id_new_item')
input_box.send_keys('Buy milk')
input_box.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Buy milk')
# Francis gets his own unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/lists/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
# Again, there is no trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Buy peacock feathers', page_text)
self.assertIn('Buy milk', page_text)
# Satisfied, they both go back to sleep
self.fail('Finish the test!')
| 40.428571 | 80 | 0.664973 | [
"MIT"
] | votatdat/obeythetestinggoat | functional_tests/tests.py | 4,528 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2021-03-06
# modified: 2021-03-09
#
import itertools
from pa1010d import PA1010D, PPS
from colorama import init, Fore, Style
init()
from lib.logger import Level, Logger
class GPS(object):
'''
A wrapper around the PA1010d GPS library. Provides individual
properties for each of the GPS outputs.
'''
def __init__(self, level):
self._log = Logger("gps", level)
self._counter = itertools.count()
self._gps = PA1010D()
# self._gps.set_pps(mode=PPS.ALWAYS)
self._gps.set_pps(mode=PPS.ONLY_2D_3D)
self.clear()
self._log.info('ready.')
# ..........................................................................
def clear(self):
self._timestamp, self._latitude, self._longitude, self._altitude, \
self._sat_count, self._quality, self._speed, self._mf_type, \
self._pdop, self._vdop, self._hdop = (None,)*11
# ..........................................................................
def read(self):
self._count = next(self._counter)
result = self._gps.update()
if result:
_data = self._gps.data
if _data.get('timestamp') == None:
self.clear()
else:
self._timestamp = _data.get('timestamp')
self._latitude = _data.get('latitude')
self._longitude = _data.get('longitude')
self._altitude = _data.get('altitude')
self._sat_count = _data.get('num_sats')
self._quality = _data.get('gps_qual')
self._speed = _data.get('speed_over_ground')
self._mf_type = _data.get('mode_fix_type')
self._pdop = _data.get('pdop')
self._vdop = _data.get('vdop')
self._hdop = _data.get('hdop')
else:
return None
# ..........................................................................
def display(self):
if self._timestamp == None:
self._log.info(Fore.CYAN + ' [{:06d}]'.format(self._count) + Fore.YELLOW + ' GPS returned null: no satellites found.')
else:
try:
_color = Fore.BLUE if self._sat_count == 0 else Fore.YELLOW
self._log.info(Fore.CYAN + ' [{:06d}]'.format(self._count) + _color \
+ Fore.GREEN + ' time: {}; {} sat; q{};'.format(self._timestamp, self._sat_count, self._quality) \
+ Fore.WHITE + ' lati-long: {:6.4f}, {:6.4f}; alt: {:5.2f}m; speed: {}m/s;'.format(self._latitude, self._longitude, self._altitude, self._speed) )
# + Fore.BLACK + ' fix type: {} PDOP: {} VDOP: {} HDOP: {}'.format(self._mf_type, self._pdop, self._vdop, self._hdop) )
except Exception:
pass
# ..........................................................................
@property
def timestamp(self):
return self._timestamp
# ..........................................................................
@property
def latitude(self):
return self._latitude
# ..........................................................................
@property
def longitude(self):
return self._longitude
# ..........................................................................
@property
def altitude(self):
return self._altitude
# ..........................................................................
@property
def satellites(self):
return self._sat_count
# ..........................................................................
@property
def quality(self):
return self._quality
# ..........................................................................
@property
def speed(self):
return self._speed
# ..........................................................................
@property
def mode_fix_type(self):
return self._mf_type
# ..........................................................................
@property
def pdop(self):
return self._pdop
# ..........................................................................
@property
def vdop(self):
return self._vdop
# ..........................................................................
@property
def hdop(self):
return self._hdop
#EOF
| 35.768657 | 170 | 0.440851 | [
"MIT"
] | fanmuzhi/ros | lib/gps.py | 4,793 | Python |
import itertools
import sys
import time
import signal
from .fancycli import fancywait
from .fancycli.platform import isatty
def skipcallback(handler):
raise StopIteration
class ShellNextFrontend:
def __init__(self, use_status_line, show_toggle):
self.show_toggle = show_toggle
self.use_status_line = use_status_line
if use_status_line:
io = sys.stdout.buffer
if hasattr(io, 'raw'):
io = io.raw
line = fancywait.StatusLine(io)
self.statusline = line
def attach(self, helper):
self.helper = helper
def alert(self, title, text, level='info', details=None):
pass
def notify(self, name, value):
pass
def delay(self, secs, allow_skip):
if not self.use_status_line:
time.sleep(secs)
return
if self.show_toggle:
togglelabel = lambda: '<r>切换自动补充理智(%s)' % ('ON' if self.helper.use_refill else 'OFF')
def togglecallback(handler):
self.helper.use_refill = not self.helper.use_refill
handler.label = togglelabel()
togglehandler = lambda: fancywait.KeyHandler(togglelabel(), b'r', togglecallback)
else:
togglehandler = lambda: fancywait.KeyHandler(None, None, None)
skiphandler = fancywait.KeyHandler('<ENTER>跳过', b'\r', skipcallback)
skipdummy = fancywait.KeyHandler(' ', b'', lambda x: None)
fancywait.fancy_delay(secs, self.statusline, [skiphandler if allow_skip else skipdummy, togglehandler()])
def _create_helper(use_status_line=True, show_toggle=False):
from Arknights.helper import ArknightsHelper
_ensure_device()
frontend = ShellNextFrontend(use_status_line, show_toggle)
helper = ArknightsHelper(device_connector=device, frontend=frontend)
if use_status_line:
context = frontend.statusline
else:
from contextlib import nullcontext
context = nullcontext()
return helper, context
def _parse_opt(argv):
ops = []
if len(argv) >= 2 and argv[1][:1] in ('+', '-'):
opts = argv.pop(1)
enable_refill = None
for i, c in enumerate(opts):
if c == '+':
enable_refill = True
elif c == '-':
enable_refill = False
elif c == 'r' and enable_refill is not None:
def op(helper):
helper.use_refill = enable_refill
helper.refill_with_item = enable_refill
ops.append(op)
elif c == 'R' and enable_refill is not None:
def op(helper):
helper.refill_with_originium = enable_refill
ops.append(op)
elif c in '0123456789' and enable_refill:
num = int(opts[i:])
def op(helper):
helper.max_refill_count = num
ops.append(op)
break
else:
raise ValueError('unrecognized token: %r in option %r' % (c, opts))
return ops
class AlarmContext:
def __init__(self, duration=60):
self.duration = duration
def __enter__(self):
self.t0 = time.monotonic()
def __exit__(self, exc_type, exc_val, exc_tb):
t1 = time.monotonic()
if t1 - self.t0 >= self.duration:
self.alarm()
def alarm(self):
pass
class BellAlarmContext(AlarmContext):
def alarm(self):
print('\a', end='')
def _alarm_context_factory():
if isatty(sys.stdout):
return BellAlarmContext()
return AlarmContext()
device = None
def connect(argv):
"""
connect [connector type] [connector args ...]
连接到设备
支持的设备类型:
connect adb [serial or tcpip endpoint]
"""
connector_type = 'adb'
if len(argv) > 1:
connector_type = argv[1]
connector_args = argv[2:]
else:
connector_args = []
if connector_type == 'adb':
_connect_adb(connector_args)
else:
print('unknown connector type:', connector_type)
def _connect_adb(args):
from connector.ADBConnector import ADBConnector, ensure_adb_alive
ensure_adb_alive()
global device
if len(args) == 0:
try:
device = ADBConnector.auto_connect()
except IndexError:
print("检测到多台设备")
devices = ADBConnector.available_devices()
for i, (serial, status) in enumerate(devices):
print("%2d. %s\t[%s]" % (i, serial, status))
num = 0
while True:
try:
num = int(input("请输入序号选择设备: "))
if not 0 <= num < len(devices):
raise ValueError()
break
except ValueError:
print("输入不合法,请重新输入")
device_name = devices[num][0]
device = ADBConnector(device_name)
else:
serial = args[0]
try:
device = ADBConnector(serial)
except RuntimeError as e:
if e.args and isinstance(e.args[0], bytes) and b'not found' in e.args[0]:
if ':' in serial and serial.split(':')[-1].isdigit():
print('adb connect', serial)
ADBConnector.paranoid_connect(serial)
device = ADBConnector(serial)
return
raise
def _ensure_device():
if device is None:
connect(['connect'])
device.ensure_alive()
def quick(argv):
"""
quick [+-rR[N]] [n]
重复挑战当前画面关卡特定次数或直到理智不足
+r/-r 是否自动回复理智,最多回复 N 次
+R/-R 是否使用源石回复理智(需要同时开启 +r)
"""
ops = _parse_opt(argv)
if len(argv) == 2:
count = int(argv[1])
else:
count = 114514
helper, context = _create_helper(show_toggle=True)
for op in ops:
op(helper)
with context:
helper.module_battle_slim(
c_id=None,
set_count=count,
)
return 0
def auto(argv):
"""
auto [+-rR[N]] stage1 count1 [stage2 count2] ...
按顺序挑战指定关卡特定次数直到理智不足
"""
ops = _parse_opt(argv)
arglist = argv[1:]
if len(arglist) % 2 != 0:
print('usage: auto [+-rR] stage1 count1 [stage2 count2] ...')
return 1
it = iter(arglist)
tasks = [(stage.upper(), int(counts)) for stage, counts in zip(it, it)]
helper, context = _create_helper(show_toggle=True)
for op in ops:
op(helper)
with context:
helper.main_handler(
clear_tasks=False,
task_list=tasks,
auto_close=False
)
return 0
def collect(argv):
"""
collect
收集每日任务和每周任务奖励
"""
helper, context = _create_helper()
with context:
helper.clear_task()
return 0
def recruit(argv):
"""
recruit [tags ...]
公开招募识别/计算,不指定标签则从截图中识别
"""
from . import recruit_calc
if 2 <= len(argv) <= 6:
tags = argv[1:]
result = recruit_calc.calculate(tags)
elif len(argv) == 1:
helper, context = _create_helper(use_status_line=False)
with context:
result = helper.recruit()
else:
print('要素过多')
return 1
colors = ['\033[36m', '\033[90m', '\033[37m', '\033[32m', '\033[93m', '\033[91m']
reset = '\033[39m'
for tags, operators, rank in result:
taglist = ','.join(tags)
if rank >= 1:
taglist = '\033[96m' + taglist + '\033[39m'
print("%s: %s" % (taglist, ' '.join(colors[op[1]] + op[0] + reset for op in operators)))
def interactive(argv):
"""
interactive
进入交互模式,减少按键次数(
"""
import shlex
import traceback
helpcmds(interactive_cmds)
errorlevel = None
try:
import readline
except ImportError:
pass
while True:
try:
if device is None:
prompt = "akhelper> "
else:
prompt = "akhelper %s> " % str(device)
cmdline = input(prompt)
argv = shlex.split(cmdline)
if len(argv) == 0 or argv[0] == '?' or argv[0] == 'help':
print(' '.join(x.__name__ for x in interactive_cmds))
continue
elif argv[0] == 'exit':
break
cmd = match_cmd(argv[0], interactive_cmds)
if cmd is not None:
with _alarm_context_factory():
errorlevel = cmd(argv)
except EOFError:
print('') # print newline on EOF
break
except (Exception, KeyboardInterrupt) as e:
errorlevel = e
traceback.print_exc()
continue
return errorlevel
argv0 = 'placeholder'
def helpcmds(cmds):
print("commands (prefix abbreviation accepted):")
for cmd in cmds:
if cmd.__doc__:
print(" " + str(cmd.__doc__.strip()))
else:
print(" " + cmd.__name__)
def help(argv):
"""
help
输出本段消息
"""
print("usage: %s command [command args]" % argv0)
helpcmds(global_cmds)
def exit(argv):
sys.exit()
global_cmds = [quick, auto, collect, recruit, interactive, help]
interactive_cmds = [connect, quick, auto, collect, recruit, exit]
def match_cmd(first, avail_cmds):
targetcmd = [x for x in avail_cmds if x.__name__.startswith(first)]
if len(targetcmd) == 1:
return targetcmd[0]
elif len(targetcmd) == 0:
print("unrecognized command: " + first)
return None
else:
print("ambiguous command: " + first)
print("matched commands: " + ','.join(x.__name__ for x in targetcmd))
return None
def main(argv):
global argv0
argv0 = argv[0]
if len(argv) < 2:
interactive(argv[1:])
return 1
targetcmd = match_cmd(argv[1], global_cmds)
if targetcmd is not None:
return targetcmd(argv[1:])
else:
help(argv)
return 1
if __name__ == '__main__':
import sys
sys.exit(main(sys.argv))
__all__ = ['main']
| 27.684932 | 113 | 0.557051 | [
"MIT"
] | TeemoKill/ArknightsAutoHelper | Arknights/shell_next.py | 10,465 | Python |
from dataloader import AmazonProductDataloader
from inverted_index import InvertedIndex
from utils import preprocess_text
import numpy as np
class BM25SearchRelevance:
def __init__(self, inverted_index, b=0.65, k1=1.6):
self.inverted_index = inverted_index
self.b = b
self.k1 = k1
self.total_documents = inverted_index.dataloader.dataset.shape[0]
self.total_documents
def score_query(self, query, k=3):
scores = {}
preprocessed_query = preprocess_text(query, tokens_only=True)
for query_term in preprocessed_query:
if query_term in self.inverted_index.term_dictionary:
term_frequencies = self.inverted_index.term_dictionary[query_term]
for term_frequency in term_frequencies:
if term_frequency["document"] not in scores:
scores[term_frequency["document"]] = 0
scores[term_frequency["document"]] += self.bm25_score(term_frequency["frequency"], len(term_frequency), term_frequency["document_length"])
scores = dict(sorted(sorted(scores.items(), key=lambda x: x[1])))
if k > len(scores.keys()):
k = len(scores.keys())
return list(scores.keys())[:k] ## returns top k documents
def bm25_score(self, term_frequency, document_frequency, document_length):
tf = term_frequency / self.k1 * ((1-self.b) + (self.b * (document_length / self.inverted_index.average_document_length))) + term_frequency
idf = np.log((self.total_documents - document_frequency + 0.5)/ (document_frequency + 0.5))
return tf * idf
| 46.971429 | 158 | 0.669708 | [
"MIT"
] | shahrukhx01/advanced-information-retrieval-tu-wien | lecture1/code/bm25_search_relevance.py | 1,644 | Python |
"""
Pre-defined query strategy for noisy oracles.
In reality, the labels given by human is not always correct. For one hand,
there are some inevitable noise comes from the instrumentation of experimental
setting. On the other hand, people can become distracted or fatigued over time,
introducing variability in the quality of their annotations.
ALiPy implements several strategies in noisy oracles settings.
Some of then mainly evaluate the quality or expertise of each oracle,
and the rest tries to obtain the accurate label for each instance
whose labels are provided by several noisy oracles.
There are 2 categories of methods.
1. Query from a single selected oracle.
1.1 Always query from the best oracle
1.2 Query from the most appropriate oracle
according to the selected instance and label.
2. Query from multiple noisy oracles. Labels are obtained from multiple noisy oracles.
And the algorithm tries to obtain the accurate label for each instance.
Implement method:
1: CEAL (IJCAI'17)
2: IEthresh (KDD'09 Donmez)
Baselines:
Majority vote
Query from all oracles and majority vote
Random select an oracle
"""
# Authors: Ying-Peng Tang
# License: BSD 3 clause
from __future__ import division
import collections
from abc import ABCMeta, abstractmethod
import copy
import numpy as np
import scipy.stats
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import NearestNeighbors
from .base import BaseNoisyOracleQuery
from .query_labels import QueryInstanceUncertainty
from .query_labels import _get_proba_pred
from ..oracle import Oracles, Oracle
def majority_vote(labels, weight=None):
"""Perform majority vote to determine the true label from
multiple noisy oracles.
Parameters
----------
labels: list
A list with length=k, which contains the labels provided by
k noisy oracles.
weight: list, optional (default=None)
The weights of each oracle. It should have the same length with
labels.
Returns
-------
vote_count: int
The number of votes.
vote_result: object
The label of the selected_instance, produced by majority voting
of the selected oracles.
"""
oracle_weight = np.ones(len(labels)) if weight is None else weight
assert len(labels) == len(oracle_weight)
vote_result = collections.Counter(labels)
most_votes = vote_result.most_common(n=1)
return most_votes[0][1], most_votes[0][0]
def get_query_results(selected_instance, oracles, names=None):
"""Get the query results from oracles of the selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
names: list, optional (default=None)
A list of str which contains the names of oracles to query from.
If not provided, it will query from all oracles.
Each name should in oracles.names().
Returns
-------
query_labels: list
The queried labels.
query_costs: list
The total cost of query.
"""
costs = []
if isinstance(oracles, list):
oracle_type = 'list'
for oracle in oracles:
assert isinstance(oracle, Oracle)
elif isinstance(oracles, Oracles):
oracle_type = 'oracles'
else:
raise TypeError("The type of parameter oracles must be a list or alipy.oracle.Oracles object.")
labeling_results = []
if oracle_type == 'list':
for i in oracles.names() if oracle_type == 'oracles' else range(len(oracles)):
lab, co = oracles[i].query_by_index(selected_instance)
labeling_results.append(lab[0])
costs.append(np.sum(co))
else:
results = oracles.query_from_s(selected_instance, oracles_name=names)
labeling_results = [res[0][0] for res in results]
costs = [np.sum(res[1]) for res in results]
return labeling_results, costs
def get_majority_vote(selected_instance, oracles, names=None):
"""Get the majority vote results of the selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
names: list, optional (default=None)
A list of str which contains the names of oracles to query from.
If not provided, it will query from all oracles.
Each name should in oracles.names().
Returns
-------
vote_count: int
The number of votes.
vote_result: object
The label of the selected_instance, produced by majority voting
of the selected oracles.
query_costs: int
The total cost of query.
"""
labeling_results, cost = get_query_results(selected_instance, oracles, names)
majority_vote_result = majority_vote(labeling_results)
return majority_vote_result[0], majority_vote_result[1], np.sum(cost)
class QueryNoisyOraclesCEAL(BaseNoisyOracleQuery):
"""Cost-Effective Active Learning from Diverse Labelers (CEAL) method assumes
that different oracles have different expertise. Even the very noisy oracle
may perform well on some kind of examples. The cost of a labeler is proportional
to its overall labeling quality and it is thus necessary to query from the right oracle
according to the selected instance.
This method will select an instance-labeler pair (x, a), and queries the label of x
from a, where the selection of both the instance and labeler is based on a
evaluation function Q(x, a).
The selection of instance is depend on its uncertainty. The selection of oracle is
depend on the oracle's performance on the nearest neighbors of selected instance.
The cost of each oracle is proportional to its overall labeling quality.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
initial_labeled_indexes: {list, np.ndarray, IndexCollection}
The indexes of initially labeled samples. Used for initializing the scores of each oracle.
References
----------
[1] Sheng-Jun Huang, Jia-Lve Chen, Xin Mu, Zhi-Hua Zhou. 2017.
Cost-Effective Active Learning from Diverse Labelers. In The
Proceedings of the 26th International Joint Conference
on Artificial Intelligence (IJCAI-17), 1879-1885.
"""
def __init__(self, X, y, oracles, initial_labeled_indexes):
super(QueryNoisyOraclesCEAL, self).__init__(X, y, oracles=oracles)
# ytype = type_of_target(self.y)
# if 'multilabel' in ytype:
# warnings.warn("This query strategy does not support multi-label.",
# category=FunctionWarning)
assert (isinstance(initial_labeled_indexes, collections.Iterable))
self._ini_ind = np.asarray(initial_labeled_indexes)
# construct a nearest neighbor object implemented by scikit-learn
self._nntree = NearestNeighbors(metric='euclidean')
self._nntree.fit(self.X[self._ini_ind])
def select(self, label_index, unlabel_index, eval_cost=False, model=None, **kwargs):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
model: object, optional (default=None)
Current classification model, should have the 'predict_proba' method for probabilistic output.
If not provided, LogisticRegression with default parameters implemented by sklearn will be used.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
Returns
-------
selected_instance: int
The index of selected instance.
selected_oracle: int or str
The index of selected oracle.
If a list is given, the index of oracle will be returned.
If a Oracles object is given, the oracle name will be returned.
"""
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
pred_unlab, _ = _get_proba_pred(self.X[unlabel_index], model)
n_neighbors = min(kwargs.pop('n_neighbors', 10), len(self._ini_ind) - 1)
return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab,
n_neighbors=n_neighbors, eval_cost=eval_cost)
def select_by_prediction_mat(self, label_index, unlabel_index, predict, **kwargs):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
Returns
-------
selected_instance: int
The index of selected instance.
selected_oracle: int or str
The index of selected oracle.
If a list is given, the index of oracle will be returned.
If a Oracles object is given, the oracle name will be returned.
"""
n_neighbors = min(kwargs.pop('n_neighbors', 10), len(self._ini_ind)-1)
eval_cost = kwargs.pop('n_neighbors', False)
Q_table, oracle_ind_name_dict = self._calc_Q_table(label_index, unlabel_index, self._oracles, predict,
n_neighbors=n_neighbors, eval_cost=eval_cost)
# get the instance-oracle pair
selected_pair = np.unravel_index(np.argmax(Q_table, axis=None), Q_table.shape)
sel_ora = oracle_ind_name_dict[selected_pair[0]]
if not isinstance(sel_ora, list):
sel_ora = [sel_ora]
return [unlabel_index[selected_pair[1]]], sel_ora
def _calc_Q_table(self, label_index, unlabel_index, oracles, pred_unlab, n_neighbors=10, eval_cost=False):
"""Query from oracles. Return the Q table and the oracle name/index of each row of Q_table.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
n_neighbors: int, optional (default=10)
How many neighbors of the selected instance will be used
to evaluate the oracles.
eval_cost: bool, optional (default=False)
To evaluate the cost of oracles or use the cost provided by oracles.
Returns
-------
Q_table: 2D array
The Q table.
oracle_ind_name_dict: dict
The oracle name/index of each row of Q_table.
"""
# Check parameter and initialize variables
if self.X is None or self.y is None:
raise Exception('Data matrix is not provided, use select_by_prediction_mat() instead.')
assert (isinstance(unlabel_index, collections.Iterable))
assert (isinstance(label_index, collections.Iterable))
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
num_of_neighbors = n_neighbors
if len(unlabel_index) <= 1:
return unlabel_index
Q_table = np.zeros((len(oracles), len(unlabel_index))) # row:oracle, col:ins
spv = np.shape(pred_unlab)
# calc least_confident
rx = np.partition(pred_unlab, spv[1] - 1, axis=1)
rx = 1 - rx[:, spv[1] - 1]
for unlab_ind, unlab_ins_ind in enumerate(unlabel_index):
# evaluate oracles for each instance
nn_dist, nn_of_selected_ins = self._nntree.kneighbors(X=self.X[unlab_ins_ind].reshape(1, -1),
n_neighbors=num_of_neighbors,
return_distance=True)
nn_dist = nn_dist[0]
nn_of_selected_ins = nn_of_selected_ins[0]
nn_of_selected_ins = self._ini_ind[nn_of_selected_ins] # map to the original population
oracles_score = []
for ora_ind, ora_name in enumerate(self._oracles_iterset):
# calc q_i(x), expertise of this instance
oracle = oracles[ora_name]
labels, cost = oracle.query_by_index(nn_of_selected_ins)
oracles_score.append(sum([nn_dist[i] * (labels[i] == self.y[nn_of_selected_ins[i]]) for i in
range(num_of_neighbors)]) / num_of_neighbors)
# calc c_i, cost of each labeler
labels, cost = oracle.query_by_index(label_index)
if eval_cost:
oracles_cost = sum([labels[i] == self.y[label_index[i]] for i in range(len(label_index))]) / len(label_index)
else:
oracles_cost = cost[0]
Q_table[ora_ind, unlab_ind] = oracles_score[ora_ind] * rx[unlab_ind] / max(oracles_cost, 0.0001)
return Q_table, self._oracle_ind_name_dict
class QueryNoisyOraclesSelectInstanceUncertainty(BaseNoisyOracleQuery, metaclass=ABCMeta):
"""This class implement select and select_by_prediction_mat by uncertainty."""
def __init__(self, X=None, y=None, oracles=None):
super(QueryNoisyOraclesSelectInstanceUncertainty, self).__init__(X=X, y=y, oracles=oracles)
def select(self, label_index, unlabel_index, model=None, **kwargs):
"""Select an instance and a batch of oracles to label it.
The instance is selected by uncertainty, the oracles is
selected by the difference between their
labeling results and the majority vote results.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
Returns
-------
selected_instance: int
The index of selected instance. Selected by uncertainty.
selected_oracles: list
The selected oracles for querying.
"""
if model is None:
model = LogisticRegression(solver='liblinear')
model.fit(self.X[label_index], self.y[label_index])
pred_unlab, _ = _get_proba_pred(self.X[unlabel_index], model)
return self.select_by_prediction_mat(label_index, unlabel_index, pred_unlab)
def select_by_prediction_mat(self, label_index, unlabel_index, predict):
"""Query from oracles. Return the index of selected instance and oracle.
Parameters
----------
label_index: {list, np.ndarray, IndexCollection}
The indexes of labeled samples.
unlabel_index: {list, np.ndarray, IndexCollection}
The indexes of unlabeled samples.
predict: : 2d array, shape [n_samples, n_classes]
The probabilistic prediction matrix for the unlabeled set.
Returns
-------
selected_instance: int
The index of selected instance. Selected by uncertainty.
selected_oracles: list
The selected oracles for querying.
"""
# Check parameter and initialize variables
assert (isinstance(unlabel_index, collections.Iterable))
assert (isinstance(label_index, collections.Iterable))
unlabel_index = np.asarray(unlabel_index)
label_index = np.asarray(label_index)
if len(unlabel_index) <= 1:
return unlabel_index
# select instance and oracle
unc = QueryInstanceUncertainty(measure='least_confident')
selected_instance = unc.select_by_prediction_mat(unlabel_index=unlabel_index, predict=predict, batch_size=1)[0]
return [selected_instance], self.select_by_given_instance(selected_instance)
@abstractmethod
def select_by_given_instance(self, selected_instance):
pass
class QueryNoisyOraclesIEthresh(QueryNoisyOraclesSelectInstanceUncertainty):
"""IEthresh will select a batch of oracles to label the selected instance.
It will score for each oracle according to the difference between their
labeling results and the majority vote results.
At each iteration, a batch of oracles whose scores are larger than a threshold will be selected.
Oracle with a higher score is more likely to be selected.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
initial_labeled_indexes: {list, np.ndarray, IndexCollection}
The indexes of initially labeled samples. Used for initializing the scores of each oracle.
epsilon: float, optional (default=0.1)
The value to determine how many oracles will be selected.
S_t = {a|UI(a) >= epsilon * max UI(a)}
References
----------
[1] Donmez P , Carbonell J G , Schneider J . Efficiently learning the accuracy of labeling
sources for selective sampling.[C] ACM SIGKDD International Conference on
Knowledge Discovery & Data Mining. ACM, 2009.
"""
def __init__(self, X, y, oracles, initial_labeled_indexes, **kwargs):
super(QueryNoisyOraclesIEthresh, self).__init__(X, y, oracles=oracles)
self._ini_ind = np.asarray(initial_labeled_indexes)
# record the labeling history of each oracle
self._oracles_history = dict()
for i in range(len(self._oracles_iterset)):
self._oracles_history[i] = dict()
# record the results of majority vote
self._majority_vote_results = dict()
# calc initial QI(a) for each oracle a
self._UI = np.ones(len(self._oracles_iterset))
self.epsilon = kwargs.pop('epsilon', 0.8)
def _calc_uia(self, oracle_history, majority_vote_result, alpha=0.05):
"""Calculate the UI(a) by providing the labeling history and the majority vote results.
Parameters
----------
oracle_history: dict
The labeling history of an oracle. The key is the index of instance, the value is the
label given by the oracle.
majority_vote_result: dict
The results of majority vote of instances. The key is the index of instance,
the value is the label given by the oracle.
alpha: float, optional (default=0.05)
Used for calculating the critical value for the Student’s t-distribution with n−1
degrees of freedom at the alpha/2 confidence level.
Returns
-------
uia: float
The UI(a) value.
"""
n = len(self._oracles_iterset)
t_crit_val = scipy.stats.t.isf([alpha / 2], n - 1)[0]
reward_arr = []
for ind in oracle_history.keys():
if oracle_history[ind] == majority_vote_result[ind]:
reward_arr.append(1)
else:
reward_arr.append(0)
mean_a = np.mean(reward_arr)
std_a = np.std(reward_arr)
uia = mean_a + t_crit_val * std_a / np.sqrt(n)
return uia
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
selected_oracles: list
The selected oracles for querying.
"""
selected_oracles = np.nonzero(self._UI >= self.epsilon * np.max(self._UI))
selected_oracles = selected_oracles[0]
# update UI(a) for each selected oracle
labeling_results = []
for i in selected_oracles:
lab, _ = self._oracles[self._oracle_ind_name_dict[i]].query_by_index(selected_instance)
labeling_results.append(lab[0])
self._oracles_history[i][selected_instance] = copy.copy(lab[0])
_, majority_vote_result = majority_vote(labeling_results)
reward_arr = np.zeros(len(selected_oracles))
same_ind = np.nonzero(labeling_results == majority_vote_result)[0]
reward_arr[same_ind] = 1
self._majority_vote_results[selected_instance] = majority_vote_result
for i in selected_oracles:
self._UI[i] = self._calc_uia(self._oracles_history[i], self._majority_vote_results)
# return results
return [self._oracle_ind_name_dict[i] for i in selected_oracles]
class QueryNoisyOraclesAll(QueryNoisyOraclesSelectInstanceUncertainty):
"""This strategy will select instance by uncertainty and query from all
oracles and return the majority vote result.
Parameters
----------
X: 2D array, optional (default=None)
Feature matrix of the whole dataset. It is a reference which will not use additional memory.
y: array-like, optional (default=None)
Label matrix of the whole dataset. It is a reference which will not use additional memory.
oracles: {list, alipy.oracle.Oracles}
An alipy.oracle.Oracle object that contains all the
available oracles or a list of oracles.
Each oracle should be a alipy.oracle.Oracle object.
"""
def __init__(self, oracles, X=None, y=None):
super(QueryNoisyOraclesAll, self).__init__(X=X, y=y, oracles=oracles)
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
oracles_ind: list
The indexes of selected oracles.
"""
return self._oracle_ind_name_dict.values()
class QueryNoisyOraclesRandom(QueryNoisyOraclesSelectInstanceUncertainty):
"""Select a random oracle to query."""
def select_by_given_instance(self, selected_instance):
"""Select oracle to query by providing the index of selected instance.
Parameters
----------
selected_instance: int
The indexes of selected samples. Should be a member of unlabeled set.
Returns
-------
oracles_ind: list
The indexes of selected oracles.
"""
return [self._oracle_ind_name_dict[np.random.randint(0, len(self._oracles), 1)[0]]]
| 40.476651 | 129 | 0.658736 | [
"BSD-3-Clause"
] | Houchaoqun/ALiPy | alipy/query_strategy/noisy_oracles.py | 25,140 | Python |
import komand
from .schema import LabelIssueInput, LabelIssueOutput
# Custom imports below
class LabelIssue(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='label_issue',
description='Label Issue',
input=LabelIssueInput(),
output=LabelIssueOutput())
def run(self, params={}):
"""Add label to issue"""
issue = self.connection.client.issue(id=params['id'])
if not issue:
raise Exception('Error: No issue found with ID: ' + params['id'])
labels = params['label'].split(',')
for label in labels:
if label not in issue.fields.labels:
issue.fields.labels.append(label)
self.logger.info('Adding labels to issue %s: %s', params['id'], issue.fields.labels)
issue.update(fields={'labels': issue.fields.labels})
return {'success': True}
def test(self):
t = self.connection.test()
if t:
return {'success': True}
| 26.74359 | 92 | 0.589645 | [
"MIT"
] | xhennessy-r7/insightconnect-plugins | jira/komand_jira/actions/label_issue/action.py | 1,043 | Python |
"""OAuth1 module written according to http://oauth.net/core/1.0/#signing_process"""
import base64
import hmac
import requests # requests must be loaded so that urllib receives the parse module
import time
import urllib
from hashlib import sha1
from six import b
from uuid import uuid4
use_parse_quote = not hasattr(urllib, 'quote')
if use_parse_quote:
_quote_func = urllib.parse.quote
else:
_quote_func = urllib.quote
def _quote(obj):
return _quote_func(str(obj), safe='')
def normalize_query_parameters(params):
"""9.1.1. Normalize Request Parameters"""
return '&'.join(map(lambda pair: '='.join([_quote(pair[0]), _quote(pair[1])]), sorted(params.items())))
def concatenate_request_elements(method, url, query):
"""9.1.3. Concatenate Request Elements"""
return '&'.join(map(_quote, [str(method).upper(), url, query]))
def hmac_sha1(base_string, hmac_key):
"""9.2. HMAC-SHA1"""
hash = hmac.new(b(hmac_key), b(base_string), sha1)
return hash.digest()
def encode(digest):
"""9.2.1. Generating Signature"""
return base64.b64encode(digest).decode('ascii').rstrip('\n')
def add_oauth_entries_to_fields_dict(secret, params, nonce=None, timestamp=None):
""" Adds dict entries to the user's params dict which are required for OAuth1.0 signature generation
:param secret: API secret
:param params: dictionary of values which will be sent in the query
:param nonce: (Optional) random string used in signature creation, uuid4() is used if not provided
:param timestamp: (Optional) integer-format timestamp, time.time() is used if not provided
:return: dict containing params and the OAuth1.0 fields required before executing signature.create
:type secret: str
:type params: dict
:type nonce: str
:type timestamp: int
:Example:
>>> from emailage.signature import add_oauth_entries_to_fields_dict
>>> query_params = dict(user_email='registered.account.user@yourcompany.com',\
query='email.you.are.interested.in@gmail.com'\
)
>>> query_params = add_oauth_entries_to_fields_dict('YOUR_API_SECRET', query_params)
>>> query_params['oauth_consumer_key']
'YOUR_API_SECRET'
>>> query_params['oauth_signature_method']
'HMAC-SHA1'
>>> query_params['oauth_version']
1.0
"""
if nonce is None:
nonce = uuid4()
if timestamp is None:
timestamp = int(time.time())
params['oauth_consumer_key'] = secret
params['oauth_nonce'] = nonce
params['oauth_signature_method'] = 'HMAC-SHA1'
params['oauth_timestamp'] = timestamp
params['oauth_version'] = 1.0
return params
def create(method, url, params, hmac_key):
""" Generates the OAuth1.0 signature used as the value for the query string parameter 'oauth_signature'
:param method: HTTP method that will be used to send the request ( 'GET' | 'POST' ); EmailageClient uses GET
:param url: API domain and endpoint up to the ?
:param params: user-provided query string parameters and the OAuth1.0 parameters
:method add_oauth_entries_to_fields_dict:
:param hmac_key: for Emailage users, this is your consumer token with an '&' (ampersand) appended to the end
:return: str value used for oauth_signature
:type method: str
:type url: str
:type params: dict
:type hmac_key: str
:Example:
>>> from emailage.signature import add_oauth_entries_to_fields_dict, create
>>> your_api_key = 'SOME_KEY'
>>> your_hmac_key = 'SOME_SECRET' + '&'
>>> api_url = 'https://sandbox.emailage.com/emailagevalidator/'
>>> query_params = { 'query': 'user.you.are.validating@gmail.com', 'user_email': 'admin@yourcompany.com' }
>>> query_params = add_oauth_entries_to_fields_dict(your_api_key, query_params)
>>> query_params['oauth_signature'] = create('GET', api_url, query_params, your_hmac_key)
"""
query = normalize_query_parameters(params)
base_string = concatenate_request_elements(method, url, query)
digest = hmac_sha1(base_string, hmac_key)
return encode(digest)
| 35.655462 | 116 | 0.679236 | [
"MIT"
] | bluefish6/Emailage_Python | emailage/signature.py | 4,243 | Python |
from numbers import Number
from typing import Dict, Sequence, Tuple, Union
import torch
from meddlr.transforms.base.spatial import (
AffineTransform,
FlipTransform,
Rot90Transform,
TranslationTransform,
)
from meddlr.transforms.build import TRANSFORM_REGISTRY
from meddlr.transforms.param_kind import ParamKind
from meddlr.transforms.transform import NoOpTransform
from meddlr.transforms.transform_gen import TransformGen
__all__ = ["RandomAffine", "RandomFlip", "RandomRot90", "RandomTranslation"]
SPATIAL_RANGE_OR_VAL = Union[float, Sequence[float], Sequence[Tuple[float, float]]]
@TRANSFORM_REGISTRY.register()
class RandomAffine(TransformGen):
_base_transform = AffineTransform
_param_names = ("angle", "translate", "scale", "shear")
def __init__(
self,
p: Union[float, Dict[str, float]] = 0.0,
angle: Union[float, Tuple[float, float]] = None,
translate: SPATIAL_RANGE_OR_VAL = None,
scale: Union[float, Tuple[float, float]] = None,
shear: SPATIAL_RANGE_OR_VAL = None,
pad_like=None,
):
if isinstance(p, Number):
p = {n: p for n in self._param_names}
else:
assert isinstance(p, dict)
unknown_keys = set(p.keys()) - set(self._param_names)
if len(unknown_keys):
raise ValueError(f"Unknown keys for `p`: {unknown_keys}")
p = p.copy()
p.update({k: 0.0 for k in self._param_names if k not in p})
params = locals()
params = {k: params[k] for k in list(self._param_names)}
self.pad_like = pad_like
super().__init__(
params=params,
p=p,
param_kinds={"translate": ParamKind.MULTI_ARG, "shear": ParamKind.MULTI_ARG},
)
def _get_params(self, shape):
ndim = len(shape)
params = self._get_param_values()
p = params["p"]
param_angle = params["angle"]
param_translate = params["translate"]
param_scale = params["scale"]
param_shear = params["shear"]
if isinstance(param_angle, Number):
param_angle = (-param_angle, param_angle)
if isinstance(param_translate, Number):
param_translate = ((-param_translate, param_translate),)
if isinstance(param_scale, Number):
param_scale = tuple(sorted([1.0 - param_scale, 1.0 + param_scale]))
if isinstance(param_shear, Number):
param_shear = ((-param_shear, param_shear),)
param_translate = self._format_param(param_translate, ParamKind.MULTI_ARG, ndim)
param_shear = self._format_param(param_shear, ParamKind.MULTI_ARG, ndim)
angle, translate, scale, shear = None, None, None, None
if param_angle is not None and self._rand() < p["angle"]:
angle = self._rand_range(*param_angle)
if param_translate is not None and self._rand() < p["translate"]:
translate = [int(self._rand_range(*x) * s) for x, s in zip(param_translate, shape)]
if param_scale is not None and self._rand() < p["scale"]:
scale = self._rand_range(*param_scale)
if param_shear is not None and self._rand() < p["shear"]:
shear = [self._rand_range(*x) for x in param_shear]
return angle, translate, scale, shear
def get_transform(self, image):
# Affine only supports 2D spatial transforms
spatial_shape = image.shape[-2:]
out = self._get_params(spatial_shape)
if all(x is None for x in out):
return NoOpTransform()
angle, translate, scale, shear = out
return AffineTransform(angle=angle, translate=translate, scale=scale, shear=shear)
@TRANSFORM_REGISTRY.register()
class RandomTranslation(TransformGen):
_base_transform = TranslationTransform
def __init__(
self,
p: Union[float, Dict[str, float]] = 0.0,
translate: SPATIAL_RANGE_OR_VAL = None,
pad_mode=None,
pad_value=0,
ndim=2,
):
params = {"translate": translate}
self.pad_mode = pad_mode
self.pad_value = pad_value
self.ndim = ndim
super().__init__(params=params, p=p, param_kinds={"translate": ParamKind.MULTI_ARG})
def get_transform(self, image):
shape = image.shape[-self.ndim :]
ndim = len(shape)
params = self._get_param_values(use_schedulers=True)
p = params["p"]
param_translate = params["translate"]
translate = self._format_param(param_translate, ParamKind.MULTI_ARG, ndim)
if self._rand() >= p:
return NoOpTransform()
translate = [int(self._rand_range(*x) * s) for x, s in zip(translate, shape)]
return TranslationTransform(translate, pad_mode=self.pad_mode, pad_value=self.pad_value)
@TRANSFORM_REGISTRY.register()
class RandomFlip(TransformGen):
_base_transform = FlipTransform
def __init__(self, dims=None, ndim=None, p: Union[float, Dict[int, float]] = 0.0) -> None:
if dims is None and ndim is None:
raise ValueError("Either `dims` or `ndim` must be specified")
if all(x is not None for x in (dims, ndim)):
raise ValueError("Only one of `dims` or `ndim` can be specified.")
if isinstance(dims, int):
dims = (dims,)
self.dims = dims
self.ndim = ndim
super().__init__(p=p)
def get_transform(self, input):
params = self._get_param_values(use_schedulers=True)
p = params["p"]
if self.dims is not None:
dims = tuple(d for d in self.dims if self._rand() < p)
else:
if isinstance(p, Dict):
dims = tuple(k for k, v in p.items() if self._rand() < v)
else:
dims = tuple(d for d in range(-self.ndim, 0) if self._rand() < p)
return FlipTransform(dims) if dims else NoOpTransform()
@TRANSFORM_REGISTRY.register()
class RandomRot90(TransformGen):
_base_transform = Rot90Transform
def __init__(self, ks=None, p=0.0) -> None:
self.ks = ks if ks is not None else list(range(1, 4))
super().__init__(p=p)
def get_transform(self, input):
params = self._get_param_values(use_schedulers=True)
if self._rand() >= params["p"]:
return NoOpTransform()
k = self.ks[torch.randperm(len(self.ks))[0].item()]
return Rot90Transform(k=k, dims=(-1, -2))
def _duplicate_ndim(param, ndim):
if param is None:
return None
if isinstance(param, Sequence) and isinstance(param[0], Sequence):
return [[x if len(x) > 1 else (-x[0], x[0]) for x in y] for y in param]
if isinstance(param, Sequence):
param = (-param[0], param[0]) if len(param) == 1 else param
else:
param = (-param, param)
return [param] * ndim
| 35.926702 | 96 | 0.626639 | [
"Apache-2.0"
] | ad12/meddlr | meddlr/transforms/gen/spatial.py | 6,862 | Python |
from flask_restful import Resource
from tasks import add
class HelloResource(Resource):
def get(self):
add.delay(3, 5)
return {"msg": "get ok"}
| 13.076923 | 34 | 0.641176 | [
"Apache-2.0"
] | NeverLeft/FLASKTPP | App/apis/HelloApi.py | 170 | Python |
# This file is a part of ninfs.
#
# Copyright (c) 2017-2021 Ian Burgwin
# This file is licensed under The MIT License (MIT).
# You can find the full license text in LICENSE.md in the root of this project.
import logging
import time
from argparse import ArgumentParser, SUPPRESS
from errno import EROFS
from functools import wraps
from io import BufferedIOBase
from os import stat, stat_result
from os.path import realpath as real_realpath
from sys import exit, platform
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from os import PathLike
from typing import BinaryIO, Generator, Tuple, Union
# this is a lazy way to make type checkers stop complaining
BufferedIOBase = BinaryIO
windows = platform in {'win32', 'cygwin'}
macos = platform == 'darwin'
python_cmd = 'py -3' if windows else 'python3'
# TODO: switch to use_ns in all scripts
# noinspection PyBroadException
try:
from fuse import FUSE, FuseOSError, Operations, fuse_get_context
except Exception as e:
exit(f'Failed to import the fuse module:\n'
f'{type(e).__name__}: {e}')
def realpath(path):
try:
return real_realpath(path)
except OSError:
# can happen on Windows when using it on files inside a WinFsp mount
pass
return path
def get_time(path: 'Union[str, PathLike, stat_result]'):
try:
if not isinstance(path, stat_result):
res = stat(path)
else:
res = path
return {'st_ctime': int(res.st_ctime), 'st_mtime': int(res.st_mtime), 'st_atime': int(res.st_atime)}
except OSError:
# sometimes os.stat can't be used with a path, such as Windows physical drives
# so we need to fake the result
now = int(time.time())
return {'st_ctime': now, 'st_mtime': now, 'st_atime': now}
# custom LoggingMixIn modified from the original fusepy, to suppress certain entries.
class LoggingMixIn:
log = logging.getLogger('fuse.log-mixin')
def __call__(self, op, path, *args):
if op != 'access':
self.log.debug('-> %s %s %s', op, path, repr(args))
ret = '[Unhandled Exception]'
try:
ret = getattr(self, op)(path, *args)
return ret
except OSError as e:
ret = str(e)
raise
finally:
if op != 'access':
self.log.debug('<- %s %s', op, repr(ret))
default_argp = ArgumentParser(add_help=False)
default_argp.add_argument('-f', '--fg', help='run in foreground', action='store_true')
default_argp.add_argument('-d', help='debug output (fuse/winfsp log)', action='store_true')
default_argp.add_argument('--do', help=SUPPRESS, default=None) # debugging using python logging
default_argp.add_argument('-o', metavar='OPTIONS', help='mount options')
readonly_argp = ArgumentParser(add_help=False)
readonly_argp.add_argument('-r', '--ro', help='mount read-only', action='store_true')
ctrcrypto_argp = ArgumentParser(add_help=False)
ctrcrypto_argp.add_argument('--boot9', help='path to boot9.bin')
ctrcrypto_argp.add_argument('--dev', help='use dev keys', action='store_const', const=1, default=0)
seeddb_argp = ArgumentParser(add_help=False)
seeddb_argp_group = seeddb_argp.add_mutually_exclusive_group()
seeddb_argp_group.add_argument('--seeddb', help='path to seeddb.bin')
seeddb_argp_group.add_argument('--seed', help='seed as hexstring')
def main_args(name: str, help: str) -> ArgumentParser:
parser = ArgumentParser(add_help=False)
parser.add_argument(name, help=help)
parser.add_argument('mount_point', help='mount point')
return parser
def load_custom_boot9(path: str, dev: bool = False):
"""Load keys from a custom ARM9 bootROM path."""
if path:
from pyctr.crypto import CryptoEngine
# doing this will set up the keys for all future CryptoEngine objects
CryptoEngine(boot9=path, dev=dev)
# aren't type hints great?
def parse_fuse_opts(opts) -> 'Generator[Tuple[str, Union[str, bool]], None, None]':
if not opts:
return
for arg in opts.split(','):
if arg: # leaves out empty ones
separated = arg.split('=', maxsplit=1)
yield separated[0], True if len(separated) == 1 else separated[1]
def remove_first_dir(path: str) -> str:
sep = path.find('/', 1)
if sep == -1:
return '/'
else:
return path[sep:]
def get_first_dir(path: str) -> str:
sep = path.find('/', 1)
if sep == -1:
return path
else:
return path[:sep]
def ensure_lower_path(method):
@wraps(method)
def wrapper(self, path, *args, **kwargs):
return method(self, path.lower(), *args, **kwargs)
return wrapper
def raise_on_readonly(method):
@wraps(method)
def wrapper(self, *args, **kwargs):
if self.readonly:
raise FuseOSError(EROFS)
return method(self, *args, **kwargs)
return wrapper
def _raise_if_closed(method):
@wraps(method)
def decorator(self, *args, **kwargs):
if self.closed:
raise ValueError('I/O operation on closed file.')
return method(self, *args, **kwargs)
return decorator
class VirtualFileWrapper(BufferedIOBase):
"""Wrapper for a FUSE Operations class for things that need a file-like object."""
_seek = 0
# noinspection PyMissingConstructor
def __init__(self, fuse_op: Operations, path: str, size: int):
self.fuse_op = fuse_op
self.path = path
self.size = size
@_raise_if_closed
def read(self, size: int = -1) -> bytes:
if size == -1:
size = self.size - self._seek
data = self.fuse_op.read(self.path, size, self._seek, 0)
self._seek += len(data)
return data
read1 = read # probably make this act like read1 should, but this for now enables some other things to work
@_raise_if_closed
def seek(self, seek: int, whence: int = 0) -> int:
if whence == 0:
if seek < 0:
raise ValueError(f'negative seek value {seek}')
self._seek = min(seek, self.size)
elif whence == 1:
self._seek = max(self._seek + seek, 0)
elif whence == 2:
self._seek = max(self.size + seek, 0)
return self._seek
@_raise_if_closed
def tell(self) -> int:
return self._seek
@_raise_if_closed
def readable(self) -> bool:
return True
@_raise_if_closed
def writable(self) -> bool:
try:
# types that support writing will have this attribute
return self.fuse_op.readonly
except AttributeError:
return False
@_raise_if_closed
def seekable(self) -> bool:
return True
class SplitFileHandler(BufferedIOBase):
_fake_seek = 0
_seek_info = (0, 0)
def __init__(self, names, mode='rb'):
self.mode = mode
self._files = []
curr_offset = 0
self._names = tuple(names)
for idx, f in enumerate(self._names):
s = stat(f)
self._files.append((idx, curr_offset, s.st_size))
curr_offset += s.st_size
self._total_size = curr_offset
def _calc_seek(self, pos):
for idx, info in enumerate(self._files):
if info[1] <= pos < info[1] + info[2]:
self._fake_seek = pos
self._seek_info = (idx, pos - info[1])
break
def seek(self, pos, whence=0):
if whence == 0:
if pos < 0:
raise ValueError('negative seek value')
self._calc_seek(pos)
elif whence == 1:
if self._fake_seek - pos < 0:
pos = 0
self._calc_seek(self._fake_seek + pos)
elif whence == 2:
if self._total_size + pos < 0:
pos = -self._total_size
self._calc_seek(self._total_size + pos)
else:
if isinstance(whence, int):
raise ValueError(f'whence value {whence} unsupported')
else:
raise TypeError(f'an integer is required (got type {type(whence).__name__})')
return self._fake_seek
@_raise_if_closed
def tell(self):
return self._fake_seek
def read(self, count=-1):
if count == -1:
count = self._total_size - count
if self._fake_seek + count > self._total_size:
count = self._total_size - self._fake_seek
left = count
curr = self._seek_info
full_data = []
while left:
info = self._files[curr[0]]
real_seek = self._fake_seek - info[1]
to_read = min(info[2] - real_seek, left)
with open(self._names[curr[0]], 'rb') as f:
f.seek(real_seek)
full_data.append(f.read(to_read))
self._fake_seek += to_read
try:
curr = self._files[curr[0] + 1]
left -= to_read
except IndexError:
break # EOF
# TODO: make this more efficient
self._calc_seek(self._fake_seek)
return b''.join(full_data)
def write(self, data: bytes):
left = len(data)
total = left
curr = self._seek_info
while left:
info = self._files[curr[0]]
real_seek = self._fake_seek - info[1]
to_write = min(info[2] - real_seek, left)
with open(self._names[curr[0]], 'rb+') as f:
f.seek(real_seek)
f.write(data[total - left:total - left + to_write])
self._fake_seek += to_write
try:
curr = self._files[curr[0] + 1]
left -= to_write
except IndexError:
break # EOF
# TODO: make this more efficient
self._calc_seek(self._fake_seek)
return total - left
@_raise_if_closed
def readable(self) -> bool:
return 'r' in self.mode
def writable(self) -> bool:
return 'w' in self.mode or 'a' in self.mode
@_raise_if_closed
def seekable(self) -> bool:
return True
class RawDeviceHandler(BufferedIOBase):
"""Handler for easier IO access with raw devices by aligning reads and writes to the sector size."""
_seek = 0
def __init__(self, fh: 'BinaryIO', mode: str = 'rb+', sector_size: int = 0x200):
self._fh = fh
self.mode = mode
self._sector_size = sector_size
@_raise_if_closed
def seek(self, seek: int, whence: int = 0) -> int:
if whence == 0:
if seek < 0:
raise ValueError(f'negative seek value {seek}')
self._seek = seek
elif whence == 1:
self._seek = max(self._seek + seek, 0)
elif whence == 2:
# this doesn't work...
raise Exception
return self._seek
@_raise_if_closed
def tell(self) -> int:
return self._seek
@_raise_if_closed
def readable(self) -> bool:
return True
@_raise_if_closed
def writable(self) -> bool:
return True
@_raise_if_closed
def seekable(self) -> bool:
return True
| 30.016086 | 112 | 0.602358 | [
"MIT"
] | Jhynjhiruu/ninfs | ninfs/mount/_common.py | 11,196 | Python |
"""
Copyright 2019 by Adam Lewicki
This file is part of the Game Theory library,
and is released under the "MIT License Agreement". Please see the LICENSE
file that should have been included as part of this package.
"""
import json
# ======================================================================================================================
# game tree object
class GameTree:
# ---------------------------------- OBJECT PROPERTIES -------------------------------------------------------------
# procedure of printing object properties
def __repr__(self):
""" return tree as JSON serialized dictionary """
return self.pretty_print(self.__dict__)
@staticmethod
def pretty_print(dictionary: dict):
""" return pretty printed dictionary as JSON serialized object """
return json.dumps(dictionary, indent=4)
# initialize object
def __init__(self, nodes: dict = None, groups: dict = None, leafs: list = None, players_list: list = None):
"""
GameTree class used to represent game tree:
Attributes
----------
nodes : dict
dictionary of nodes;
groups : dict
dictionary of groups
leafs : list
list of leafs, calculated on demand
players_list: list
list of players names, indicating which game income from list is connected to which player
"""
'''
dictionary of nodes:
Attributes
----------
node : dict
dictionary representing node;
Attributes
----------
value : float
value of node (the prize for reaching the node)
parents : dict
parents of node - can be multiple, represented by dict of ids and connection values
children : dict
children of node - can be multiple, represented by dict of ids and connection values
probability : float
probability of node - 1 means there is no random choice
branch : dict
totals of branch, to avoid tree walking
Attributes
----------
value : float
total value of branch
probability : float
probability of reaching this node in game
'''
# remember to add new attributes to add_node method default values setting
self._nodes = {}
# dictionary of knowledge groups
self._groups = {} if groups is None else groups
# dictionary of leafs
self._leafs = [] if leafs is None else leafs
self._players_list = [] if players_list is None else players_list
# always add root
self.add_node({
'id': 'root',
'player': '1',
}) if nodes is None else nodes
# ---------------------------------- NODES -------------------------------------------------------------------------
def add_node(self, node: dict):
"""
add node method. Runs basic validation before adding.
:param dict node: dictionary of node's data
"""
# check if it is not overriding existing node
if node.get('id') is not None:
if node['id'] in self._nodes:
raise ValueError('tried to override node %s' % node['id'])
else:
raise ValueError('no id for node provided')
# append node to list
id_ = node['id']
del node['id']
# set default values for node
# remember to add new attributes here and in __init__ root node
node['player'] = '0' if node.get('player') is None else node['player']
node['value'] = [0, 0] if node.get('value') is None else node['value']
node['parents'] = {} if node.get('parents') is None else node['parents']
node['children'] = {} if node.get('children') is None else node['children']
node['probability'] = 1 if node.get('probability') is None else node['probability']
node['branch'] = {} if node.get('branch') is None else node['branch']
node['branch']['probability'] = 1 \
if node['branch'].get('probability') is None else node['branch']['probability']
# add player to the list of players if he is not there already
if node['player'] not in self._players_list:
self._players_list.append(node['player'])
# add parenthood
for parent in node['parents']:
# noinspection PyTypeChecker
self._nodes[parent]['children'][id_] = str(node['parents'][parent])
# set depth to one more than first parent
if node['parents']:
node['depth'] = self._nodes[str(list(node['parents'].keys())[0])]['depth'] + 1
else:
node['depth'] = 0 if node.get('depth') is None else node['depth']
# calculate total probability of node:
# total probability equals sum of probabilities of parents multiplied by probability of node
branch_probability = 0
for parent in node['parents']:
branch_probability += self._nodes[parent]['branch']['probability']
node['branch']['probability'] = branch_probability * node['probability']
# validate against the error of node not being connected to the rest of the tree via parents removal:
if id_ is not 'root' and not node['parents']:
raise ValueError('node [%s] is not connected to the tree - parents are empty' % id_)
# add node
self._nodes[id_] = node
def add_vertex(self, id_: str, player: str, parents: dict):
"""
add vertex from simplified function:
:param str id_: id of the node
:param str player: id of player owning the node
:param dict parents: dictionary of parents for the node
"""
self.add_node({
'id': id_,
'player': player,
'parents': parents
})
def add_leaf(self, id_: str, value: list, parents: dict):
"""
add leaf from simplified function:
:param str id_: id of the node
:param list value: list of node's values
:param dict parents: dictionary of parents for the node
"""
self.add_node({
'id': id_,
'value': value,
'parents': parents
})
def copy_node(self, from_: str, to_: str):
"""
create a copy of node's properties in another node
:param str from_: origin node of properties
:param str to_: destination node for properties
"""
self._nodes[to_] = dict(self._nodes[from_])
def change_node(self, node: dict):
"""
change node method. Changes attributes provided in node dictionary
:param dict node: dictionary of node's data
"""
# check if it is not overriding existing node
if node.get('id') is not None:
if node['id'] not in self._nodes:
raise ValueError('tried to change non-existing node %s' % node['id'])
else:
raise ValueError('no id for node provided')
# change attributes
id_ = node['id']
del node['id']
for attribute in node:
self._nodes[id_][attribute] = node[attribute]
# ---------------------------------- OBJECT BASIC METHODS ----------------------------------------------------------
def get_parent(self, id_) -> str:
""" get id of the parent node """
return list(self._nodes[id_]['parents'].keys())[0]
def get_player_index(self, id_) -> int:
""" return player index from players list order """
return self._players_list.index(self._nodes[id_]['player'])
def get_path_to_node(self, id_: str, mode: str = 'nodes') -> list:
"""
get path from root to the node
:param str id_: id of the node you want to reach from root
:param str mode: mode of return type, 'nodes' - make path with nodes id, 'moves' - make path with player choices
"""
path_t = []
node = id_
while node is not 'root':
if mode == 'nodes':
path_t.insert(0, node)
elif mode == 'moves':
parent_ = self.get_parent(node)
path_t.insert(0, self._nodes[parent_]['children'][node])
else:
raise ValueError('mode variable is not "nodes" nor "moves"')
node = self.get_parent(node)
if mode == 'nodes':
path_t.insert(0, 'root')
return path_t
@staticmethod
def _get_key(obj: dict, val: str) -> list:
"""
get list of keys with specified value from obj dictionary
:param dict obj: chosen dictionary
:param str val: specified value
"""
sublist = [key for (key, value) in obj.items() if value == val]
if sublist:
return sublist
else:
raise ValueError('key with value %s does not exist in %s' % (val, obj))
def get_tree(self) -> dict:
""" return copy of tree nodes structure dict"""
return dict(self._nodes)
# -------------- LEAFS -------------
def calculate_leafs(self):
""" calculate inner list of leafs ids """
self._leafs = [node for node in self._nodes if not self._nodes[node]['children']]
def get_leafs(self) -> list:
""" return list of leafs ids. Will return empty list, if calculate_leafs() has not been called earlier. """
return self._leafs[:]
# -------------- GROUPS ------------
def set_group(self, id_: str, player: str, group: list):
"""
add list of ids to new group
:param str id_: id of group
:param str player: id of player owning the group
:param list group: list of id's you want to create group with
"""
self._groups[id_] = {
'player': player,
'group': group
}
def get_groups(self) -> dict:
""" return dictionary of groups """
return dict(self._groups)
def get_groups_of_player(self, player: str) -> list:
""" return list of all groups id's where player is the owner """
return [group for group in self._groups if self._groups[group]['player'] == player]
# ==================================================================================================================
| 37.909747 | 120 | 0.544805 | [
"MIT"
] | deadsmond/gametree | gametree_lite.py | 10,501 | Python |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pretend
import pytest
import yara
from warehouse.malware.checks.setup_patterns import check as c
from warehouse.malware.models import (
MalwareCheckState,
VerdictClassification,
VerdictConfidence,
)
from .....common.db.malware import MalwareCheckFactory
from .....common.db.packaging import FileFactory
def test_initializes(db_session):
check_model = MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
assert check.id == check_model.id
assert isinstance(check._yara_rules, yara.Rules)
@pytest.mark.parametrize(
("obj", "file_url"), [(None, pretend.stub()), (pretend.stub(), None)]
)
def test_scan_missing_kwargs(db_session, obj, file_url):
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
with pytest.raises(c.FatalCheckException):
check.scan(obj=obj, file_url=file_url)
def test_scan_non_sdist(db_session):
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="bdist_wheel")
check.scan(obj=file, file_url=pretend.stub())
assert check._verdicts == []
def test_scan_no_setup_contents(db_session, monkeypatch):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c, "extract_file_content", pretend.call_recorder(lambda *a: None)
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Indeterminate
assert check._verdicts[0].confidence == VerdictConfidence.High
assert (
check._verdicts[0].message
== "sdist does not contain a suitable setup.py for analysis"
)
@pytest.mark.parametrize("benign", ["", """from os import path"""])
def test_scan_benign_contents(db_session, monkeypatch, benign):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c,
"extract_file_content",
pretend.call_recorder(
lambda *a: b"this is a benign string\n" + benign.encode("utf-8")
),
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
assert check._verdicts[0].classification == VerdictClassification.Benign
assert check._verdicts[0].confidence == VerdictConfidence.Low
assert check._verdicts[0].message == "No malicious patterns found in setup.py"
@pytest.mark.parametrize(
"malicious, rule",
[
# process_spawn_in_setup
("""os.system('cat /etc/passwd')""", "process_spawn_in_setup"),
("""os.popen('cat /etc/passwd')""", "process_spawn_in_setup"),
("""os.popen3('cat /etc/passwd')""", "process_spawn_in_setup"),
("""os.spawn('cat /etc/passwd')""", "process_spawn_in_setup"),
("""os.spawnve('cat /etc/passwd')""", "process_spawn_in_setup"),
("""os.posix_spawn('cat /etc/passwd')""", "process_spawn_in_setup"),
("""os.posix_spawnp('cat /etc/passwd')""", "process_spawn_in_setup"),
(
"""os.exec('malicious_code')""",
"process_spawn_in_setup:metaprogramming_in_setup",
),
("""os.execve('malicious_code')""", "process_spawn_in_setup"),
(
"""
from os import *
system('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import path, system
system('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import system
system('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import popen
popen('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import popen3
popen3('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import spawn
spawn('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import spawnve
spawnve('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import posix_spawn
posix_spawn('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import posix_spawnp
posix_spawnp('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
from os import exec
exec('cat /etc/passwd')
""",
"process_spawn_in_setup:metaprogramming_in_setup",
),
(
"""
from os import execve
execve('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
# subprocess_in_setup
("subprocess.run('...')", "subprocess_in_setup"),
("subprocess.Popen('...')", "subprocess_in_setup"),
("subprocess.call('...')", "subprocess_in_setup"),
("subprocess.check_call('...')", "subprocess_in_setup"),
("subprocess.check_output('...')", "subprocess_in_setup"),
(
"""
from subprocess import run
run('...')
""",
"subprocess_in_setup",
),
(
"""
from subprocess import Popen
Popen('...')
""",
"subprocess_in_setup",
),
(
"""
from subprocess import call
call('...')
""",
"subprocess_in_setup",
),
(
"""
from subprocess import check_call
check_call('...')
""",
"subprocess_in_setup",
),
(
"""
from subprocess import check_output
check_output('...')
""",
"subprocess_in_setup",
),
# networking_in_setup
("from socket import something", "networking_in_setup"),
("from socket.something import something", "networking_in_setup"),
("import socket", "networking_in_setup"),
("from socketserver import something", "networking_in_setup"),
("from socketserver.something import something", "networking_in_setup"),
("import socketserver", "networking_in_setup"),
("from ssl import something", "networking_in_setup"),
("from ssl.something import something", "networking_in_setup"),
("import ssl", "networking_in_setup"),
("from ftplib import something", "networking_in_setup"),
("from http.something import something", "networking_in_setup"),
("import http", "networking_in_setup"),
("import http", "networking_in_setup"),
("from urllib import something", "networking_in_setup"),
("from urllib.something import something", "networking_in_setup"),
("import urllib", "networking_in_setup"),
("from xmlrpc import something", "networking_in_setup"),
("from xmlrpc.something import something", "networking_in_setup"),
("import xmlrpc", "networking_in_setup"),
# deserialization_in_setup
("from pickle import something", "deserialization_in_setup"),
("from pickle.something import something", "deserialization_in_setup"),
("import pickle", "deserialization_in_setup"),
("from base64 import something", "deserialization_in_setup"),
("from base64.something import something", "deserialization_in_setup"),
("import base64", "deserialization_in_setup"),
("from binhex import something", "deserialization_in_setup"),
("from binhex.something import something", "deserialization_in_setup"),
("import binhex", "deserialization_in_setup"),
# metaprogramming_in_setup
("from inspect import something", "metaprogramming_in_setup"),
("from inspect.something import something", "metaprogramming_in_setup"),
("import inspect", "metaprogramming_in_setup"),
("from compileall import something", "metaprogramming_in_setup"),
("from compileall.something import something", "metaprogramming_in_setup"),
("import compileall", "metaprogramming_in_setup"),
("from py_compile import something", "metaprogramming_in_setup"),
("from py_compile.something import something", "metaprogramming_in_setup"),
("import py_compile", "metaprogramming_in_setup"),
("from builtins import something", "metaprogramming_in_setup"),
("from builtins.something import something", "metaprogramming_in_setup"),
("import builtins", "metaprogramming_in_setup"),
("__builtins__.bla", "metaprogramming_in_setup"),
("from importlib import something", "metaprogramming_in_setup"),
("from importlib.something import something", "metaprogramming_in_setup"),
("import importlib", "metaprogramming_in_setup"),
("__import__('bla')", "metaprogramming_in_setup"),
("from sys import modules, path", "metaprogramming_in_setup"),
("from sys import path, modules", "metaprogramming_in_setup"),
("import sys.modules", "metaprogramming_in_setup"),
("compile('malicious')", "metaprogramming_in_setup"),
("dir(someobject)", "metaprogramming_in_setup"),
("someobject.__dir__()", "metaprogramming_in_setup"),
("eval('malicious')", "metaprogramming_in_setup"),
("exec('malicious')", "metaprogramming_in_setup"),
("getattr(someobject, 'attr')", "metaprogramming_in_setup"),
("vars(someobject)", "metaprogramming_in_setup"),
("someobject.__dict__()", "metaprogramming_in_setup"),
("globals()", "metaprogramming_in_setup"),
("locals()", "metaprogramming_in_setup"),
("chr(42)", "metaprogramming_in_setup"),
("ord('x')", "metaprogramming_in_setup"),
# alias imports
(
"""
import os as evil
evil.system('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os
evil = os
evil.system('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os
hof = lambda evil, scary : evil(scary)
hof(os.system, "cat /etc/passwd")
""",
"process_spawn_in_setup",
),
(
"""
import os as evil
evil.popen('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os
evil = os
evil.popen('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os as evil
evil.popen3('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os
evil = os
evil.popen3('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os as evil
evil.spawn('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os
evil = os
evil.spawn('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os as evil
evil.spawnve('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os
evil = os
evil.spawnve('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os as evil
evil.posix_spawn('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os
evil = os
evil.posix_spawn('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os as evil
evil.posix_spawnp('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os
evil = os
evil.posix_spawnp('cat /etc/passwd')
""",
"process_spawn_in_setup",
),
(
"""
import os as evil
evil.exec('malicious_code')
""",
"process_spawn_in_setup:metaprogramming_in_setup",
),
(
"""
import os
evil = os
evil.exec('malicious_code')
""",
"process_spawn_in_setup:metaprogramming_in_setup",
),
(
"""
import os as evil
evil.execve('malicious_code')
""",
"process_spawn_in_setup",
),
(
"""
import os
evil = os
evil.execve('malicious_code')
""",
"process_spawn_in_setup",
),
(
"""
import subprocess as evil
evil.run('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess
evil = subprocess
evil.run('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess as evil
evil.Popen('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess
evil = subprocess
evil.Popen('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess as evil
evil.call('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess
evil = subprocess
evil.call('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess as evil
evil.check_call('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess
evil = subprocess
evil.check_call('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess as evil
evil.check_output('...')
""",
"subprocess_in_setup",
),
(
"""
import subprocess
evil = subprocess
evil.check_output('...')
""",
"subprocess_in_setup",
),
# higher order functions, used for metaprogramming in setup.py
(
"""
hof = lambda evil : evil('malicious')
hof(compile)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil(scary)
hof(compile, 'malicious')
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof('malicious', compile)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof('malicious',compile)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil[0]('malicious')
hof([compile])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil[0](scary)
hof([compile], 'malicious')
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary[0](evil)
hof('malicious', [compile])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']('malicious')
hof({'mean' : compile})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']('malicious')
hof({'mean':compile})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]('malicious')
hof({compile:'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]('malicious')
hof({compile : 'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil(someobject)
hof(dir)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil(scary)
hof(dir, someobject)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof(someobject, dir)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof(someobject,dir)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil[0](someobject)
hof([dir])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil[0](scary)
hof([dir], someobject)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary[0](evil)
hof(someobject, [dir])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean'](someobject)
hof({'mean' : dir})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean'](someobject)
hof({'mean':dir})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0](someobject)
hof({dir:'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0](someobject)
hof({dir : 'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil('malicious')
hof(eval)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil(scary)
hof(eval, 'malicious')
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof('malicious', eval)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof('malicious',eval)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil[0]('malicious')
hof([eval])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil[0](scary)
hof([eval], 'malicious')
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary[0](evil)
hof('malicious', [eval])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']('malicious')
hof({'mean' : eval})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']('malicious')
hof({'mean':eval})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]('malicious')
hof({eval:'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]('malicious')
hof({eval : 'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil('malicious')
hof(exec)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil(scary)
hof(exec, 'malicious')
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof('malicious', exec)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof('malicious',exec)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil[0]('malicious')
hof([exec])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil[0](scary)
hof([exec], 'malicious')
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary[0](evil)
hof('malicious', [exec])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']('malicious')
hof({'mean' : exec})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']('malicious')
hof({'mean':exec})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]('malicious')
hof({exec:'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]('malicious')
hof({exec: 'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil(someobject, 'attr')
hof(getattr)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary, unkind : evil(scary, unkind)
hof(getattr, someobject, 'attr')
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda unkind, evil, scary : scary(evil, unkind)
hof('attr', someobject, getattr)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda unkind, evil, scary : scary(evil, unkind)
hof('attr', someobject,getattr)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil[0](someobject, 'attr')
hof([getattr])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary, unkind : evil[0](scary, unkind)
hof([getattr], someobject, 'attr')
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda unkind, evil, scary : scary[0](evil, unkind)
hof('attr', someobject, [getattr])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean'](someobject, 'attr')
hof({'mean' : getattr})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean'](someobject, 'attr')
hof({'mean':getattr})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0](someobject, 'attr')
hof({getattr:'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0](someobject, 'attr')
hof({getattr : 'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil(someobject)
hof(vars)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil(scary)
hof(vars, someobject)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof(someobject, vars)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary(evil)
hof(someobject,vars)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil[0](someobject)
hof([vars])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : evil[0](scary)
hof([vars], someobject)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, scary : scary[0](evil)
hof(someobject, [vars])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean'](someobject)
hof({'mean' : vars})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean'](someobject)
hof({'mean':vars})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0](someobject)
hof({vars:'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0](someobject)
hof({vars : 'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil()
hof(globals)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, _scary : evil()
hof(globals, None)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda _evil, scary : scary()
hof(None, globals)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda _evil, scary : scary()
hof(None,globals)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil[0]()
hof([globals])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']()
hof({'mean' : globals})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']()
hof({'mean':globals})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]()
hof({globals:'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]()
hof({globals : 'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil()
hof(locals)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil, _scary : evil()
hof(locals, None)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda _evil, scary : scary()
hof(None, locals)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda _evil, scary : scary()
hof(None,locals)
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil[0]()
hof([locals])
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']()
hof({'mean' : locals})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : evil['mean']()
hof({'mean':locals})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]()
hof({locals:'bad'})
""",
"metaprogramming_in_setup",
),
(
"""
hof = lambda evil : list(evil.keys())[0]()
hof({locals : 'bad'})
""",
"metaprogramming_in_setup",
),
],
)
def test_scan_matched_content(db_session, monkeypatch, malicious, rule):
monkeypatch.setattr(
c, "fetch_url_content", pretend.call_recorder(lambda *a: pretend.stub())
)
monkeypatch.setattr(
c,
"extract_file_content",
pretend.call_recorder(
lambda *a: b"this looks suspicious:\n" + malicious.encode("utf-8")
),
)
MalwareCheckFactory.create(
name="SetupPatternCheck", state=MalwareCheckState.Enabled
)
check = c.SetupPatternCheck(db_session)
file = FileFactory.create(packagetype="sdist")
check.scan(obj=file, file_url=pretend.stub())
assert len(check._verdicts) == 1
assert check._verdicts[0].check_id == check.id
assert check._verdicts[0].file_id == file.id
threat_rules = {"process_spawn_in_setup", "subprocess_in_setup"}
if set(rule.split(":")) & threat_rules:
assert check._verdicts[0].classification == VerdictClassification.Threat
else:
assert check._verdicts[0].classification == VerdictClassification.Indeterminate
assert check._verdicts[0].confidence == VerdictConfidence.High
assert check._verdicts[0].message == rule
| 28.699741 | 87 | 0.460151 | [
"Apache-2.0"
] | 001101/warehouse | tests/unit/malware/checks/setup_patterns/test_check.py | 33,263 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class RNanotime(RPackage):
"""Nanosecond-Resolution Time Support for R.
Full 64-bit resolution date and time functionality with; nanosecond
granularity is provided, with easy transition to and from; the standard
'POSIXct' type. Three additional classes offer interval,; period and
duration functionality for nanosecond-resolution timestamps."""
cran = "nanotime"
version('0.3.6', sha256='df751a5cb11ca9ac8762cd1e33bc73e7d20fde9339d2c46bc6f85873388568df')
version('0.3.5', sha256='44deaae58452bacea4855d018212593811401c2afc460ffb11905479013923a0')
version('0.3.2', sha256='9ef53c3bca01b605a9519190117988e170e63865327007c90b05d31fe7f22b1d')
version('0.2.4', sha256='2dfb7e7435fec59634b87563a215467e7793e2711e302749c0533901c74eb184')
version('0.2.3', sha256='7d6df69a4223ae154f610b650e24ece38ce4aa706edfa38bec27d15473229f5d')
version('0.2.0', sha256='9ce420707dc4f0cb4241763579b849d842904a3aa0d88de8ffef334d08fa188d')
depends_on('r-bit64', type=('build', 'run'))
depends_on('r-rcppcctz@0.2.3:', type=('build', 'run'))
depends_on('r-rcppcctz@0.2.9:', type=('build', 'run'), when='@0.3.2:')
depends_on('r-zoo', type=('build', 'run'))
depends_on('r-rcpp', type=('build', 'run'), when='@0.3.2:')
depends_on('r-rcppdate', type=('build', 'run'), when='@0.3.2:')
| 48.65625 | 95 | 0.739242 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | jmellorcrummey/spack | var/spack/repos/builtin/packages/r-nanotime/package.py | 1,557 | Python |
#!/usr/bin/env python
"""This is the GRR frontend HTTP Server."""
import BaseHTTPServer
import cgi
import cStringIO
from multiprocessing import freeze_support
from multiprocessing import Process
import pdb
import socket
import SocketServer
import threading
import ipaddr
import logging
# pylint: disable=unused-import,g-bad-import-order
from grr.lib import server_plugins
# pylint: enable=g-bad-import-order
from grr.lib import communicator
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import flow
from grr.lib import master
from grr.lib import rdfvalue
from grr.lib import startup
from grr.lib import stats
from grr.lib import type_info
from grr.lib import utils
# pylint: disable=g-bad-name
class GRRHTTPServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""GRR HTTP handler for receiving client posts."""
statustext = {200: "200 OK",
406: "406 Not Acceptable",
500: "500 Internal Server Error"}
active_counter_lock = threading.Lock()
active_counter = 0
def Send(self, data, status=200, ctype="application/octet-stream",
last_modified=0):
self.wfile.write(("HTTP/1.0 %s\r\n"
"Server: BaseHTTP/0.3 Python/2.6.5\r\n"
"Content-type: %s\r\n"
"Content-Length: %d\r\n"
"Last-Modified: %s\r\n"
"\r\n"
"%s") %
(self.statustext[status], ctype, len(data),
self.date_time_string(last_modified), data))
def do_GET(self):
"""Server the server pem with GET requests."""
if self.path.startswith("/server.pem"):
self.ServerPem()
def ServerPem(self):
self.Send(self.server.server_cert)
RECV_BLOCK_SIZE = 8192
def _GetPOSTData(self, length):
# During our tests we have encountered some issue with the socket library
# that would stall for a long time when calling socket.recv(n) with a large
# n. rfile.read() passes the length down to socket.recv() so it's much
# faster to read the data in small 8k chunks.
input_data = cStringIO.StringIO()
while length >= 0:
read_size = min(self.RECV_BLOCK_SIZE, length)
data = self.rfile.read(read_size)
if not data:
break
input_data.write(data)
length -= len(data)
return input_data.getvalue()
def do_POST(self):
"""Process encrypted message bundles."""
self.Control()
@stats.Counted("frontend_request_count", fields=["http"])
@stats.Timed("frontend_request_latency", fields=["http"])
def Control(self):
"""Handle POSTS."""
if not master.MASTER_WATCHER.IsMaster():
# We shouldn't be getting requests from the client unless we
# are the active instance.
stats.STATS.IncrementCounter("frontend_inactive_request_count",
fields=["http"])
logging.info("Request sent to inactive frontend from %s",
self.client_address[0])
# Get the api version
try:
api_version = int(cgi.parse_qs(self.path.split("?")[1])["api"][0])
except (ValueError, KeyError, IndexError):
# The oldest api version we support if not specified.
api_version = 3
with GRRHTTPServerHandler.active_counter_lock:
GRRHTTPServerHandler.active_counter += 1
stats.STATS.SetGaugeValue("frontend_active_count", self.active_counter,
fields=["http"])
try:
length = int(self.headers.getheader("content-length"))
request_comms = rdfvalue.ClientCommunication(self._GetPOSTData(length))
# If the client did not supply the version in the protobuf we use the get
# parameter.
if not request_comms.api_version:
request_comms.api_version = api_version
# Reply using the same version we were requested with.
responses_comms = rdfvalue.ClientCommunication(
api_version=request_comms.api_version)
source_ip = ipaddr.IPAddress(self.client_address[0])
if source_ip.version == 6:
source_ip = source_ip.ipv4_mapped or source_ip
request_comms.orig_request = rdfvalue.HttpRequest(
raw_headers=utils.SmartStr(self.headers),
source_ip=utils.SmartStr(source_ip))
source, nr_messages = self.server.frontend.HandleMessageBundles(
request_comms, responses_comms)
logging.info("HTTP request from %s (%s), %d bytes - %d messages received,"
" %d messages sent.",
source, utils.SmartStr(source_ip), length, nr_messages,
responses_comms.num_messages)
self.Send(responses_comms.SerializeToString())
except communicator.UnknownClientCert:
# "406 Not Acceptable: The server can only generate a response that is not
# accepted by the client". This is because we can not encrypt for the
# client appropriately.
self.Send("Enrollment required", status=406)
except Exception as e:
if flags.FLAGS.debug:
pdb.post_mortem()
logging.error("Had to respond with status 500: %s.", e)
self.Send("Error", status=500)
finally:
with GRRHTTPServerHandler.active_counter_lock:
GRRHTTPServerHandler.active_counter -= 1
stats.STATS.SetGaugeValue("frontend_active_count", self.active_counter,
fields=["http"])
class GRRHTTPServer(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
"""The GRR HTTP frontend server."""
allow_reuse_address = True
request_queue_size = 500
address_family = socket.AF_INET6
def __init__(self, server_address, handler, frontend=None, *args, **kwargs):
stats.STATS.SetGaugeValue("frontend_max_active_count",
self.request_queue_size)
if frontend:
self.frontend = frontend
else:
self.frontend = flow.FrontEndServer(
certificate=config_lib.CONFIG["Frontend.certificate"],
private_key=config_lib.CONFIG["PrivateKeys.server_key"],
max_queue_size=config_lib.CONFIG["Frontend.max_queue_size"],
message_expiry_time=config_lib.CONFIG["Frontend.message_expiry_time"],
max_retransmission_time=config_lib.CONFIG[
"Frontend.max_retransmission_time"])
self.server_cert = config_lib.CONFIG["Frontend.certificate"]
(address, _) = server_address
version = ipaddr.IPAddress(address).version
if version == 4:
self.address_family = socket.AF_INET
elif version == 6:
self.address_family = socket.AF_INET6
logging.info("Will attempt to listen on %s", server_address)
BaseHTTPServer.HTTPServer.__init__(self, server_address, handler, *args,
**kwargs)
def CreateServer(frontend=None):
server_address = (config_lib.CONFIG["Frontend.bind_address"],
config_lib.CONFIG["Frontend.bind_port"])
httpd = GRRHTTPServer(server_address, GRRHTTPServerHandler, frontend=frontend)
sa = httpd.socket.getsockname()
logging.info("Serving HTTP on %s port %d ...", sa[0], sa[1])
return httpd
def Serve(server):
try:
server.serve_forever()
except KeyboardInterrupt:
pass
def main(unused_argv):
"""Main."""
config_lib.CONFIG.AddContext("HTTPServer Context")
startup.Init()
httpd = CreateServer()
if config_lib.CONFIG["Frontend.processes"] > 1:
# Multiprocessing
for _ in range(config_lib.CONFIG["Frontend.processes"] - 1):
Process(target=Serve, args=(httpd,)).start()
try:
httpd.serve_forever()
except KeyboardInterrupt:
print "Caught keyboard interrupt, stopping"
if __name__ == "__main__":
freeze_support()
flags.StartMain(main)
| 31.814815 | 80 | 0.669642 | [
"Apache-2.0"
] | strcrzy/grr | tools/http_server.py | 7,731 | Python |
import re
from typing import Set, Any, List
from sequal.amino_acid import AminoAcid
from sequal.modification import Modification, ModificationMap
from copy import deepcopy
import itertools
from json import dumps
mod_pattern = re.compile(r"[\(|\[]+([^\)]+)[\)|\]]+")
mod_enclosure_start = {"(", "[", "{"}
mod_enclosure_end = {")", "]", "}"}
class Sequence:
seq: List[Any]
def __init__(self, seq, encoder=AminoAcid, mods=None, parse=True, parser_ignore=None, mod_position="right"):
"""
:param mod_position
Indicate the position of the modifications relative to the base block it is supposed to modify
:type mod_position: str
:param mods
Dictionary whose keys are the positions within the sequence and values are array of modifications at those
positions
:type mods: dict
:param encoder
Class for encoding of sequence.
:type encoder: BaseBlock
:param seq
String or array of strings or array of AminoAcid objects. The parser will recursively look over each string at
deepest level and identify individual modifications or amino acids for processing
:type seq: iterable
Python iterable where the deepest level is a string
"""
if type(seq) is not Sequence:
if not mods:
self.mods = {}
else:
self.mods = mods
self.encoder = encoder
if not parser_ignore:
self.parser_ignore = []
else:
self.parser_ignore = parser_ignore
self.seq = []
current_mod = []
current_position = 0
if parse:
self.sequence_parse(current_mod, current_position, mod_position, mods, seq)
else:
for k in seq.__dict__:
if k != "mods":
setattr(self, k, deepcopy(seq.__dict__[k]))
self.seq_length = len(self.seq)
def __getitem__(self, key):
return self.seq[key]
def __len__(self):
return self.seq_length
def __repr__(self):
a = ""
for i in self.seq:
a += str(i)
return a
def __str__(self):
a = ""
for i in self.seq:
a += str(i)
return a
def sequence_parse(self, current_mod, current_position, mod_position, mods, seq):
"""
:param seq: sequence input
:param mods: external modification input
:param mod_position: modification position relative to the modified residue
:param current_position: current iterating amino acid position from the input sequence
:type current_mod: List[Modification]
"""
for b, m in self.__load_sequence_iter(iter(seq)):
if not m:
if mod_position == "left":
if type(b) == AminoAcid:
current_unit = b
current_unit.position = current_position
else:
current_unit = self.encoder(b, current_position)
if current_mod and not mods:
for i in current_mod:
current_unit.set_modification(i)
elif current_position in self.mods and current_unit:
if type(self.mods[current_position]) == Modification:
current_unit.set_modification(self.mods[current_position])
else:
for mod in self.mods[current_position]:
current_unit.set_modification(mod)
self.seq.append(deepcopy(current_unit))
current_mod = []
if mod_position == "right":
if current_mod and not mods:
for i in current_mod:
self.seq[current_position - 1].set_modification(i)
if type(b) == AminoAcid:
current_unit = b
current_unit.position = current_position
else:
current_unit = self.encoder(b, current_position)
if current_position in self.mods and current_unit:
if type(self.mods[current_position]) == Modification:
current_unit.set_modification(self.mods[current_position])
else:
for mod in self.mods[current_position]:
current_unit.set_modification(mod)
self.seq.append(deepcopy(current_unit))
current_mod = []
current_position += 1
else:
if not mods:
current_mod.append(Modification(b[1:-1]))
def __load_sequence_iter(self, seq=None, iter_seq=None):
mod_open = 0
block = ""
mod = False
if not iter_seq:
iter_seq = iter(seq)
for i in iter_seq:
if type(i) == str:
if i in mod_enclosure_start:
mod = True
mod_open += 1
elif i in mod_enclosure_end:
mod_open -= 1
block += i
elif type(i) == AminoAcid:
block = i
else:
yield from self.__load_sequence_iter(iter_seq=iter_seq)
if mod_open == 0:
yield (block, mod)
mod = False
block = ""
def __iter__(self):
self.current_iter_count = 0
return self
def __next__(self):
if self.current_iter_count == self.seq_length:
raise StopIteration
result = self.seq[self.current_iter_count]
self.current_iter_count += 1
return result
def add_modifications(self, mod_dict):
for aa in self.seq:
if aa.position in mod_dict:
for mod in mod_dict[aa.position]:
aa.set_modification(mod)
def to_stripped_string(self):
"""
Return string of the sequence without any modification annotation
:return: str
"""
seq = ""
for i in self.seq:
seq += i.value
return seq
def to_string_customize(self, data, annotation_placement="right", block_separator="", annotation_enclose_characters=("[", "]"),
individual_annotation_enclose=False, individual_annotation_enclose_characters=("[", "]"),
individual_annotation_separator=""):
"""
:rtype: str
:param data: a dictionary where the key is the index position of the amino acid residue and the value is a
iterable where containing the item needed to be included into the sequence.
:param annotation_placement: whether the information should be included on the right of the left of the residue
:param block_separator: separator between each block of annotation information to be included
:param annotation_enclose_characters: enclosure characters for each annotation cluster
:param individual_annotation_enclose: whether or not each individual annotation should be enclosed
:param individual_annotation_enclose_characters: enclosure characters for each individual annotation
:param individual_annotation_separator: separator for each individual annotation
:return:
"""
assert annotation_placement in {"left", "right"}
seq = []
for i in range(len(self.seq)):
seq.append(self.seq[i].value)
if i in data:
annotation = []
if individual_annotation_enclose:
for v in data[i]:
annotation.append("{}{}{}".format(individual_annotation_enclose_characters[0], v, individual_annotation_enclose_characters[1]))
else:
annotation = data[i]
if type(annotation) == str:
ann = annotation
else:
ann = individual_annotation_separator.join(annotation)
if annotation_enclose_characters:
seq.append("{}{}{}".format(annotation_enclose_characters[0], ann, annotation_enclose_characters[1]))
else:
seq.append(individual_annotation_separator.join(ann))
return block_separator.join(seq)
def count_unique_elements(seq):
elements = {}
for i in seq:
if i.value not in elements:
elements[i.value] = 0
elements[i.value] += 1
if i.mods:
for m in i.mods:
if m.value not in elements:
elements[m.value] = 0
elements[m.value] += 1
return elements
def variable_position_placement_generator(positions):
"""
Use itertools.product to generate a list of tuple with different number of 0 and 1. The length of the tuple is the
length of the input positions.
Using itertools.compress, for each output from itertools.product pairing with input positions, we generate a list of
positions where only those with the same index as 1 would be yielded.
:param positions: list of all identified positions for the modification on the sequence
"""
for i in itertools.product([0, 1], repeat=len(positions)):
yield list(itertools.compress(positions, i))
def ordered_serialize_position_dict(positions):
return dumps(positions, sort_keys=True, default=str)
class ModdedSequenceGenerator:
used_scenarios_set: Set[str]
def __init__(self, seq, variable_mods=None, static_mods=None, used_scenarios=None, parse_mod_position=True, mod_position_dict=None, ignore_position=None):
"""
Generator for creating modified sequences.
:type used_scenarios: set
:type static_mods: List[Modification]
:type variable_mods: List[Modification]
:type seq: str
"""
self.seq = seq
if static_mods:
self.static_mods = static_mods
self.static_map = ModificationMap(seq, static_mods, parse_position=parse_mod_position, mod_position_dict=mod_position_dict)
self.static_mod_position_dict = self.static_mod_generate()
else:
self.static_mod_position_dict = {}
if ignore_position:
self.ignore_position = ignore_position
else:
self.ignore_position = set()
for i in self.static_mod_position_dict:
self.ignore_position.add(i)
if variable_mods:
self.variable_mods = variable_mods
if self.static_mod_position_dict:
self.variable_map = ModificationMap(seq, variable_mods, ignore_positions=self.ignore_position, parse_position=parse_mod_position, mod_position_dict=mod_position_dict)
else:
self.variable_map = ModificationMap(seq, variable_mods)
self.variable_mod_number = len(variable_mods)
else:
self.variable_mods = None
self.variable_map_scenarios = {}
if used_scenarios:
self.used_scenarios_set = used_scenarios
else:
self.used_scenarios_set = set()
def generate(self):
if self.variable_mods:
self.variable_mod_generate_scenarios()
for i in self.explore_scenarios():
a = dict(self.static_mod_position_dict)
a.update(i)
serialized_a = ordered_serialize_position_dict(a)
if serialized_a not in self.used_scenarios_set:
self.used_scenarios_set.add(serialized_a)
yield a
else:
serialized_a = ordered_serialize_position_dict(self.static_mod_position_dict)
if serialized_a not in self.used_scenarios_set:
yield self.static_mod_position_dict
def static_mod_generate(self):
position_dict = {}
for m in self.static_mods:
for pm in self.static_map.get_mod_positions(str(m)):
if pm not in position_dict:
position_dict[pm] = []
position_dict[pm].append(m)
return position_dict
def variable_mod_generate_scenarios(self):
"""
Recursively generating all possible position compositions for each variable modification and add them to
self.variable_map_scenarios dictionary where key is the value attr of the modification while the value is the
position list
"""
for i in self.variable_mods:
positions = self.variable_map.get_mod_positions(str(i))
if i.value not in self.variable_map_scenarios:
if not i.all_fill:
self.variable_map_scenarios[i.value] = list(
variable_position_placement_generator(positions))
else:
self.variable_map_scenarios[i.value] = [[], positions]
def explore_scenarios(self, current_mod=0, mod=None):
if mod is None:
mod = {}
for pos in self.variable_map_scenarios[self.variable_mods[current_mod].value]:
temp_dict = deepcopy(mod)
if pos:
for p in pos:
if p not in temp_dict:
temp_dict[p] = [self.variable_mods[current_mod]]
if current_mod != self.variable_mod_number - 1:
yield from self.explore_scenarios(current_mod + 1, temp_dict)
else:
yield temp_dict
else:
if current_mod != self.variable_mod_number - 1:
yield from self.explore_scenarios(current_mod + 1, temp_dict)
else:
yield temp_dict
| 39.637394 | 182 | 0.583905 | [
"MIT"
] | bschulzlab/dialib_standalone | sequal/sequence.py | 13,992 | Python |
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 5 17:37:31 2017
@author: Flame
"""
from TuringMachine import Rule, Q, Move, TuringMachine, Tape
from TuringMachine import EMTY_SYMBOL as empty
def check(input_str):
rules= \
[
Rule(Q(1),'1',Q(1),'1', Move.Right),# приводим к первоначальному виду
Rule(Q(1),'0',Q(1),'0', Move.Right),
Rule(Q(1),',',Q(2),'#', Move.Right),
Rule(Q(2),' ',Q(2),'#', Move.Right),
Rule(Q(2),'#',Q(2),'#', Move.Left),
Rule(Q(2),'*',Q(6),'*', Move.Left),
Rule(Q(2),'1',Q(3),'#', Move.Left), # операции со строками
Rule(Q(3),'#',Q(3),'#', Move.Left),
Rule(Q(3),'1',Q(4),'0', Move.Right), #встретили единичку, значит вычитаем её
Rule(Q(3),'0',Q(3),'1', Move.Left), #встретили нолик, значит добавляем единичку и идём вычитать единичку у след порядка
Rule(Q(4),'0',Q(4),'0', Move.Right), # идём вправо, чтобы найти разделитель
Rule(Q(4),'1',Q(4),'1', Move.Right),
Rule(Q(4),'#',Q(5),'#', Move.Right), # идём вправо, чтобы найти единичку
Rule(Q(5),'#',Q(5),'#', Move.Right),
Rule(Q(5),'1',Q(3),'#', Move.Left),
Rule(Q(5),empty,Q(6),empty, Move.Left),
Rule(Q(6),'#',Q(6),'*', Move.Left),
Rule(Q(6),'0',Q(6),'*', Move.Left),
Rule(Q(6),empty,Q(10),empty, Move.Stay), # значит строка верна, переходим в конечное состояние
]
TM = TuringMachine(rules, Q(1), Q(10))
print(TM)
print( "Right" if TM.check(Tape(input_str)) else "Wrong")
| 38.9 | 128 | 0.535347 | [
"MIT"
] | KarpenkoDenis/TuringMachine | main.py | 1,808 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SecretBundle(Model):
"""A secret consisting of a value, id and its attributes.
Variables are only populated by the server, and will be ignored when
sending a request.
:param value: The secret value.
:type value: str
:param id: The secret id.
:type id: str
:param content_type: The content type of the secret.
:type content_type: str
:param attributes: The secret management attributes.
:type attributes: ~azure.keyvault.v2016_10_01.models.SecretAttributes
:param tags: Application specific metadata in the form of key-value pairs.
:type tags: dict[str, str]
:ivar kid: If this is a secret backing a KV certificate, then this field
specifies the corresponding key backing the KV certificate.
:vartype kid: str
:ivar managed: True if the secret's lifetime is managed by key vault. If
this is a secret backing a certificate, then managed will be true.
:vartype managed: bool
"""
_validation = {
'kid': {'readonly': True},
'managed': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'id': {'key': 'id', 'type': 'str'},
'content_type': {'key': 'contentType', 'type': 'str'},
'attributes': {'key': 'attributes', 'type': 'SecretAttributes'},
'tags': {'key': 'tags', 'type': '{str}'},
'kid': {'key': 'kid', 'type': 'str'},
'managed': {'key': 'managed', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(SecretBundle, self).__init__(**kwargs)
self.value = kwargs.get('value', None)
self.id = kwargs.get('id', None)
self.content_type = kwargs.get('content_type', None)
self.attributes = kwargs.get('attributes', None)
self.tags = kwargs.get('tags', None)
self.kid = None
self.managed = None
| 37.84127 | 78 | 0.599832 | [
"MIT"
] | JonathanGailliez/azure-sdk-for-python | azure-keyvault/azure/keyvault/v2016_10_01/models/secret_bundle.py | 2,384 | Python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import traceback
from flask import current_app, render_template
from flask import make_response
from flask.json import jsonify
from ooniapi.auth import auth_blueprint
from ooniapi.citizenlab import cz_blueprint
from ooniapi.private import api_private_blueprint
from ooniapi.measurements import api_msm_blueprint
from ooniapi.pages import pages_blueprint
from ooniapi.probe_services import probe_services_blueprint
from ooniapi.prio import prio_bp
HERE = os.path.abspath(os.path.dirname(__file__))
#def render_problem_exception(exception):
# response = exception.to_problem()
# return FlaskApi.get_response(response)
# def render_generic_exception(exception):
# if not isinstance(exception, werkzeug.exceptions.HTTPException):
# exc_name = "{}.{}".format(type(exception).__module__, type(exception).__name__)
# exc_desc = str(exception)
# if hasattr(exception, "__traceback__"):
# current_app.logger.error(
# "".join(traceback.format_tb(exception.__traceback__))
# )
# current_app.logger.error(
# "Unhandled error occurred, {}: {}".format(exc_name, exc_desc)
# )
# exception = werkzeug.exceptions.InternalServerError(
# description="An unhandled application error occurred: {}".format(exc_name)
# )
#
# response = problem(
# title=exception.name, detail=exception.description, status=exception.code
# )
# return FlaskApi.get_response(response)
def render_generic_exception(exception):
"""Log a traceback and return code 500 with a simple JSON
The CORS header is set as usual. Without this, an error could lead to browsers
caching a response without the correct CORS header.
"""
# TODO: render_template 500.html instead?
current_app.logger.error(f"Exception: {exception}")
current_app.logger.error(
"".join(traceback.format_tb(exception.__traceback__))
)
try:
return make_response(jsonify(error=str(exception)), 500)
except:
return make_response("unhandled error", 500)
def page_not_found(e):
return render_template("404.html"), 404
def bad_request(e):
return render_template("400.html", exception=e), 400
def register(app):
#app.register_blueprint(api_docs_blueprint, url_prefix="/api")
# Measurements API:
app.register_blueprint(api_msm_blueprint, url_prefix="/api")
#app.register_blueprint(connexion_api.blueprint)
app.register_blueprint(auth_blueprint, url_prefix="")
app.register_blueprint(cz_blueprint, url_prefix="")
# Private API
app.register_blueprint(api_private_blueprint, url_prefix="/api/_")
# The index is here:
app.register_blueprint(pages_blueprint, url_prefix="")
# Probe services
app.register_blueprint(probe_services_blueprint, url_prefix="")
app.register_blueprint(prio_bp, url_prefix="")
if "PYTEST_CURRENT_TEST" not in os.environ:
app.register_error_handler(Exception, render_generic_exception)
app.errorhandler(404)(page_not_found)
app.errorhandler(400)(bad_request)
| 33.204082 | 88 | 0.734788 | [
"BSD-3-Clause"
] | hellais/ooni-measurements | newapi/ooniapi/views.py | 3,254 | Python |
from .elektra5585 import Elektra5585, Elektra5585Line
| 27 | 53 | 0.87037 | [
"Apache-2.0"
] | CiscoDevNet/firepower-kickstart | kick/device2/elektra5585/actions/__init__.py | 54 | Python |
import os
import sys
import subprocess
CondylesFeaturesExtractor = "/Users/prisgdd/Documents/Projects/CNN/CondylesFeaturesExtractor-build/src/bin/condylesfeaturesextractor"
parser = argparse.ArgumentParser()
parser.add_argument('-meshDir', action='store', dest='meshDir', help='Input file to classify',
default = "/Users/prisgdd/Desktop/TestPipeline/inputGroups/Mesh")
parser.add_argument('-outputDir', action='store', dest='outputDir', help='Directory for output files',
default="/Users/prisgdd/Desktop/TestPipeline/outputSurfRemesh")
parser.add_argument('-meanGroup', action='store', dest='meanGroup', help='Directory with all the mean shapes',
default="/Users/prisgdd/Documents/Projects/CNN/drive-download-20161123T180828Z")
args = parser.parse_args()
meshDir= args.meshDir
outputDir = args.outputDir
meanGroup = args.meanGroup
# Verify directory integrity
if not os.path.isdir(meshDir) or not os.path.isdir(outputDir):
sys.exit("Error: At least one input is not a directory.")
listMesh = os.listdir(meshDir)
if listMesh.count(".DS_Store"):
listMesh.remove(".DS_Store")
for i in range(0,len(listMesh)):
command = list()
command.append(CondylesFeaturesExtractor)
command.append("--input")
command.append(meshDir + "/" + listMesh[i])
outputFile = outputDir + "/" + listMesh[i].split(".")[:-1][0] + "-Features.vtk"
print outputFile
file = open(outputFile, 'w')
file.close()
command.append("--output")
command.append(outputFile)
command.append("--meanGroup")
command.append(meanGroup)
subprocess.call(command)
| 29.185185 | 133 | 0.739848 | [
"Apache-2.0"
] | pdedumast/CondylesClassification | src/runCondylesFeaturesExtractor.py | 1,576 | Python |
import torch
import torch.nn as nn
from .mol_tree import Vocab, MolTree
from .nnutils import create_var
from .jtnn_enc import JTNNEncoder
from .jtnn_dec import JTNNDecoder
from .mpn import MPN, mol2graph
from .jtmpn import JTMPN
from .chemutils import enum_assemble, set_atommap, copy_edit_mol, attach_mols, atom_equal, decode_stereo
import rdkit
import rdkit.Chem as Chem
from rdkit import DataStructs
from rdkit.Chem import AllChem
import copy, math
import sys
def set_batch_nodeID(mol_batch, vocab):
tot = 0
for mol_tree in mol_batch:
for node in mol_tree.nodes:
node.idx = tot
#node.wid = vocab.get_index(node.smiles)
tot += 1
class JTNNVAE(nn.Module):
def __init__(self, vocab, hidden_size, latent_size, depth, stereo=True):
super(JTNNVAE, self).__init__()
self.vocab = vocab
self.hidden_size = hidden_size
self.latent_size = latent_size
self.depth = depth
self.embedding = nn.Embedding(vocab.size(), hidden_size)
self.jtnn = JTNNEncoder(vocab, hidden_size, self.embedding)
self.jtmpn = JTMPN(hidden_size, depth)
self.mpn = MPN(hidden_size, depth)
self.decoder = JTNNDecoder(vocab, hidden_size, latent_size // 2, self.embedding)
self.T_mean = nn.Linear(hidden_size, latent_size // 2)
self.T_var = nn.Linear(hidden_size, latent_size // 2)
self.G_mean = nn.Linear(hidden_size, latent_size // 2)
self.G_var = nn.Linear(hidden_size, latent_size // 2)
self.assm_loss = nn.CrossEntropyLoss(size_average=False)
self.use_stereo = stereo
if stereo:
self.stereo_loss = nn.CrossEntropyLoss(size_average=False)
def encode(self, mol_batch):
set_batch_nodeID(mol_batch, self.vocab)
#root_batch = [mol_tree.nodes[0] for mol_tree in mol_batch]
#tree_mess,tree_vec = self.jtnn(root_batch)
smiles_batch = [mol_tree.smiles for mol_tree in mol_batch]
mol_vec = self.mpn(mol2graph(smiles_batch))
#return tree_mess, tree_vec, mol_vec
return mol_vec
def encode_from_smiles(self, smiles_list):
mol_batch, valid_idx = [], []
for idx, s in enumerate(smiles_list):
try:
mol_batch.append(MolTree(s))
valid_idx.append(idx)
except:
sys.stderr.write('Invalid SMILE string: {}\n'.format(s))
for mol_tree in mol_batch:
mol_tree.recover()
mol_vec = self.encode(mol_batch)
mol_mean = self.G_mean(mol_vec)
return mol_mean
def encode_latent_mean(self, smiles_list):
mol_batch, valid_idx = [], []
for idx, s in enumerate(smiles_list):
try:
mol_batch.append(MolTree(s))
valid_idx.append(idx)
except:
sys.stderr.write('Invalid SMILE string: {}\n'.format(s))
for mol_tree in mol_batch:
mol_tree.recover()
#_, tree_vec, mol_vec = self.encode(mol_batch)
#tree_mean = self.T_mean(tree_vec)
mol_vec = self.encode(mol_batch)
mol_mean = self.G_mean(mol_vec)
#return torch.cat([tree_mean,mol_mean], dim=1)
return mol_mean, valid_idx
def forward(self, mol_batch, beta=0):
batch_size = len(mol_batch)
tree_mess, tree_vec, mol_vec = self.encode(mol_batch)
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
z_mean = torch.cat([tree_mean,mol_mean], dim=1)
z_log_var = torch.cat([tree_log_var,mol_log_var], dim=1)
kl_loss = -0.5 * torch.sum(1.0 + z_log_var - z_mean * z_mean - torch.exp(z_log_var)) / batch_size
epsilon = create_var(torch.randn(batch_size, self.latent_size // 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var // 2) * epsilon
epsilon = create_var(torch.randn(batch_size, self.latent_size // 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var // 2) * epsilon
word_loss, topo_loss, word_acc, topo_acc = self.decoder(mol_batch, tree_vec)
assm_loss, assm_acc = self.assm(mol_batch, mol_vec, tree_mess)
if self.use_stereo:
stereo_loss, stereo_acc = self.stereo(mol_batch, mol_vec)
else:
stereo_loss, stereo_acc = 0, 0
all_vec = torch.cat([tree_vec, mol_vec], dim=1)
loss = word_loss + topo_loss + assm_loss + 2 * stereo_loss + beta * kl_loss
return loss, kl_loss.item(), word_acc, topo_acc, assm_acc, stereo_acc
def assm(self, mol_batch, mol_vec, tree_mess):
cands = []
batch_idx = []
for i,mol_tree in enumerate(mol_batch):
for node in mol_tree.nodes:
#Leaf node's attachment is determined by neighboring node's attachment
if node.is_leaf or len(node.cands) == 1: continue
cands.extend( [(cand, mol_tree.nodes, node) for cand in node.cand_mols] )
batch_idx.extend([i] * len(node.cands))
cand_vec = self.jtmpn(cands, tree_mess)
cand_vec = self.G_mean(cand_vec)
batch_idx = create_var(torch.LongTensor(batch_idx))
mol_vec = mol_vec.index_select(0, batch_idx)
mol_vec = mol_vec.view(-1, 1, self.latent_size // 2)
cand_vec = cand_vec.view(-1, self.latent_size // 2, 1)
scores = torch.bmm(mol_vec, cand_vec).squeeze()
cnt,tot,acc = 0,0,0
all_loss = []
for i,mol_tree in enumerate(mol_batch):
comp_nodes = [node for node in mol_tree.nodes if len(node.cands) > 1 and not node.is_leaf]
cnt += len(comp_nodes)
for node in comp_nodes:
label = node.cands.index(node.label)
ncand = len(node.cands)
cur_score = scores.narrow(0, tot, ncand)
tot += ncand
if cur_score[label].item() >= cur_score.max().item():
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.assm_loss(cur_score.view(1,-1), label) )
#all_loss = torch.stack(all_loss).sum() / len(mol_batch)
all_loss = sum(all_loss) / len(mol_batch)
return all_loss, acc * 1.0 / cnt
def stereo(self, mol_batch, mol_vec):
stereo_cands,batch_idx = [],[]
labels = []
for i,mol_tree in enumerate(mol_batch):
cands = mol_tree.stereo_cands
if len(cands) == 1: continue
if mol_tree.smiles3D not in cands:
cands.append(mol_tree.smiles3D)
stereo_cands.extend(cands)
batch_idx.extend([i] * len(cands))
labels.append( (cands.index(mol_tree.smiles3D), len(cands)) )
if len(labels) == 0:
return create_var(torch.zeros(1)), 1.0
batch_idx = create_var(torch.LongTensor(batch_idx))
stereo_cands = self.mpn(mol2graph(stereo_cands))
stereo_cands = self.G_mean(stereo_cands)
stereo_labels = mol_vec.index_select(0, batch_idx)
scores = torch.nn.CosineSimilarity()(stereo_cands, stereo_labels)
st,acc = 0,0
all_loss = []
for label,le in labels:
cur_scores = scores.narrow(0, st, le)
if cur_scores.data[label] >= cur_scores.max().data[0]:
acc += 1
label = create_var(torch.LongTensor([label]))
all_loss.append( self.stereo_loss(cur_scores.view(1,-1), label) )
st += le
#all_loss = torch.cat(all_loss).sum() / len(labels)
all_loss = sum(all_loss) / len(labels)
return all_loss, acc * 1.0 / len(labels)
def reconstruct(self, smiles, prob_decode=False):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
epsilon = create_var(torch.randn(1, self.latent_size // 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var // 2) * epsilon
epsilon = create_var(torch.randn(1, self.latent_size // 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var // 2) * epsilon
return self.decode(tree_vec, mol_vec, prob_decode)
def recon_eval(self, smiles):
mol_tree = MolTree(smiles)
mol_tree.recover()
_,tree_vec,mol_vec = self.encode([mol_tree])
tree_mean = self.T_mean(tree_vec)
tree_log_var = -torch.abs(self.T_var(tree_vec)) #Following Mueller et al.
mol_mean = self.G_mean(mol_vec)
mol_log_var = -torch.abs(self.G_var(mol_vec)) #Following Mueller et al.
all_smiles = []
for i in range(10):
epsilon = create_var(torch.randn(1, self.latent_size // 2), False)
tree_vec = tree_mean + torch.exp(tree_log_var // 2) * epsilon
epsilon = create_var(torch.randn(1, self.latent_size // 2), False)
mol_vec = mol_mean + torch.exp(mol_log_var // 2) * epsilon
for j in range(10):
new_smiles = self.decode(tree_vec, mol_vec, prob_decode=True)
all_smiles.append(new_smiles)
return all_smiles
def sample_prior(self, prob_decode=False):
tree_vec = create_var(torch.randn(1, self.latent_size // 2), False)
mol_vec = create_var(torch.randn(1, self.latent_size // 2), False)
return self.decode(tree_vec, mol_vec, prob_decode)
def sample_eval(self):
tree_vec = create_var(torch.randn(1, self.latent_size // 2), False)
mol_vec = create_var(torch.randn(1, self.latent_size // 2), False)
all_smiles = []
for i in range(100):
s = self.decode(tree_vec, mol_vec, prob_decode=True)
all_smiles.append(s)
return all_smiles
def decode(self, tree_vec, mol_vec, prob_decode):
pred_root,pred_nodes = self.decoder.decode(tree_vec, prob_decode)
#Mark nid & is_leaf & atommap
for i,node in enumerate(pred_nodes):
node.nid = i + 1
node.is_leaf = (len(node.neighbors) == 1)
if len(node.neighbors) > 1:
set_atommap(node.mol, node.nid)
tree_mess = self.jtnn([pred_root])[0]
cur_mol = copy_edit_mol(pred_root.mol)
global_amap = [{}] + [{} for node in pred_nodes]
global_amap[1] = {atom.GetIdx():atom.GetIdx() for atom in cur_mol.GetAtoms()}
cur_mol = self.dfs_assemble(tree_mess, mol_vec, pred_nodes, cur_mol, global_amap, [], pred_root, None, prob_decode)
if cur_mol is None:
return None
cur_mol = cur_mol.GetMol()
set_atommap(cur_mol)
cur_mol = Chem.MolFromSmiles(Chem.MolToSmiles(cur_mol))
if cur_mol is None: return None
if self.use_stereo == False:
return Chem.MolToSmiles(cur_mol)
smiles2D = Chem.MolToSmiles(cur_mol)
stereo_cands = decode_stereo(smiles2D)
if len(stereo_cands) == 1:
return stereo_cands[0]
stereo_vecs = self.mpn(mol2graph(stereo_cands))
stereo_vecs = self.G_mean(stereo_vecs)
scores = nn.CosineSimilarity()(stereo_vecs, mol_vec)
_,max_id = scores.max(dim=0)
return stereo_cands[max_id.data.item()]
def dfs_assemble(self, tree_mess, mol_vec, all_nodes, cur_mol, global_amap, fa_amap, cur_node, fa_node, prob_decode):
fa_nid = fa_node.nid if fa_node is not None else -1
prev_nodes = [fa_node] if fa_node is not None else []
children = [nei for nei in cur_node.neighbors if nei.nid != fa_nid]
neighbors = [nei for nei in children if nei.mol.GetNumAtoms() > 1]
neighbors = sorted(neighbors, key=lambda x:x.mol.GetNumAtoms(), reverse=True)
singletons = [nei for nei in children if nei.mol.GetNumAtoms() == 1]
neighbors = singletons + neighbors
cur_amap = [(fa_nid,a2,a1) for nid,a1,a2 in fa_amap if nid == cur_node.nid]
cands = enum_assemble(cur_node, neighbors, prev_nodes, cur_amap)
if len(cands) == 0:
return None
cand_smiles,cand_mols,cand_amap = zip(*cands)
cands = [(candmol, all_nodes, cur_node) for candmol in cand_mols]
cand_vecs = self.jtmpn(cands, tree_mess)
cand_vecs = self.G_mean(cand_vecs)
mol_vec = mol_vec.squeeze()
scores = torch.mv(cand_vecs, mol_vec) * 20
if prob_decode:
probs = nn.Softmax()(scores.view(1,-1)).squeeze() + 1e-5 #prevent prob = 0
cand_idx = torch.multinomial(probs, probs.numel())
else:
_,cand_idx = torch.sort(scores, descending=True)
backup_mol = Chem.RWMol(cur_mol)
for i in range(cand_idx.numel()):
cur_mol = Chem.RWMol(backup_mol)
pred_amap = cand_amap[cand_idx[i].item()]
new_global_amap = copy.deepcopy(global_amap)
for nei_id,ctr_atom,nei_atom in pred_amap:
if nei_id == fa_nid:
continue
new_global_amap[nei_id][nei_atom] = new_global_amap[cur_node.nid][ctr_atom]
cur_mol = attach_mols(cur_mol, children, [], new_global_amap) #father is already attached
new_mol = cur_mol.GetMol()
new_mol = Chem.MolFromSmiles(Chem.MolToSmiles(new_mol))
if new_mol is None: continue
result = True
for nei_node in children:
if nei_node.is_leaf: continue
cur_mol = self.dfs_assemble(tree_mess, mol_vec, all_nodes, cur_mol, new_global_amap, pred_amap, nei_node, cur_node, prob_decode)
if cur_mol is None:
result = False
break
if result: return cur_mol
return None
| 40.739884 | 144 | 0.62124 | [
"MIT"
] | samgoldman97/icml18-jtnn | jtnn/jtnn_vae.py | 14,096 | Python |
# This code is heavily inspired from https://github.com/fangwei123456/PixelUnshuffle-pytorch
import torch
import torch.nn as nn
import torch.nn.functional as F
def pixel_unshuffle(input, downscale_factor):
'''
input: batchSize * c * k*w * k*h
downscale_factor: k
batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h
'''
c = input.shape[1]
kernel = torch.zeros(size = [downscale_factor * downscale_factor * c, 1, downscale_factor, downscale_factor],
device = input.device)
for y in range(downscale_factor):
for x in range(downscale_factor):
kernel[x + y * downscale_factor::downscale_factor * downscale_factor, 0, y, x] = 1
return F.conv2d(input, kernel, stride = downscale_factor, groups = c)
class PixelUnShuffle(nn.Module):
def __init__(self, downscale_factor):
super(PixelUnShuffle, self).__init__()
self.downscale_factor = downscale_factor
def forward(self, input):
'''
input: batchSize * c * k*w * k*h
downscale_factor: k
batchSize * c * k*w * k*h -> batchSize * k*k*c * w * h
'''
return pixel_unshuffle(input, self.downscale_factor)
| 36.272727 | 113 | 0.643275 | [
"MIT"
] | laowng/GISR | src/model/PixelUnShuffle.py | 1,197 | Python |
import itertools
import logging
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmcv.utils import print_log
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from terminaltables import AsciiTable
from mmdet.core import eval_recalls
from .builder import DATASETS
from .custom import CustomDataset
try:
import pycocotools
assert pycocotools.__version__ >= '12.0.2'
except AssertionError:
raise AssertionError('Incompatible version of pycocotools is installed. '
'Run pip uninstall pycocotools first. Then run pip '
'install mmpycocotools to install open-mmlab forked '
'pycocotools.')
@DATASETS.register_module()
class CocoDatasetCar(CustomDataset):
CLASSES = ('small ship', 'small car', 'bus', 'truck', 'train')
def load_annotations(self, ann_file):
"""Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
Returns:
list[dict]: Annotation info from COCO api.
"""
self.coco = COCO(ann_file)
self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
self.img_ids = self.coco.get_img_ids()
data_infos = []
for i in self.img_ids:
info = self.coco.load_imgs([i])[0]
info['filename'] = info['file_name']
data_infos.append(info)
return data_infos
def get_ann_info(self, idx):
"""Get COCO annotation by index.
Args:
idx (int): Index of data.
Returns:
dict: Annotation info of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return self._parse_ann_info(self.data_infos[idx], ann_info)
def get_cat_ids(self, idx):
"""Get COCO category ids by index.
Args:
idx (int): Index of data.
Returns:
list[int]: All categories in the image of specified index.
"""
img_id = self.data_infos[idx]['id']
ann_ids = self.coco.get_ann_ids(img_ids=[img_id])
ann_info = self.coco.load_anns(ann_ids)
return [ann['category_id'] for ann in ann_info]
def _filter_imgs(self, min_size=32):
"""Filter images too small or without ground truths."""
valid_inds = []
# obtain images that contain annotation
ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())
# obtain images that contain annotations of the required categories
ids_in_cat = set()
for i, class_id in enumerate(self.cat_ids):
ids_in_cat |= set(self.coco.cat_img_map[class_id])
# merge the image id sets of the two conditions and use the merged set
# to filter out images if self.filter_empty_gt=True
ids_in_cat &= ids_with_ann
valid_img_ids = []
for i, img_info in enumerate(self.data_infos):
img_id = self.img_ids[i]
if self.filter_empty_gt and img_id not in ids_in_cat:
continue
if min(img_info['width'], img_info['height']) >= min_size:
valid_inds.append(i)
valid_img_ids.append(img_id)
self.img_ids = valid_img_ids
return valid_inds
def _parse_ann_info(self, img_info, ann_info):
"""Parse bbox and mask annotation.
Args:
ann_info (list[dict]): Annotation info of an image.
with_mask (bool): Whether to parse mask annotations.
Returns:
dict: A dict containing the following keys: bboxes, bboxes_ignore,\
labels, masks, seg_map. "masks" are raw annotations and not \
decoded into binary masks.
"""
gt_bboxes = []
gt_labels = []
gt_bboxes_ignore = []
gt_masks_ann = []
for i, ann in enumerate(ann_info):
if ann.get('ignore', False):
continue
x1, y1, w, h = ann['bbox']
inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))
inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))
if inter_w * inter_h == 0:
continue
if ann['area'] <= 0 or w < 1 or h < 1:
continue
if ann['category_id'] not in self.cat_ids:
continue
bbox = [x1, y1, x1 + w, y1 + h]
if ann.get('iscrowd', False):
gt_bboxes_ignore.append(bbox)
else:
gt_bboxes.append(bbox)
gt_labels.append(self.cat2label[ann['category_id']])
gt_masks_ann.append(ann.get('segmentation', None))
if gt_bboxes:
gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
gt_labels = np.array(gt_labels, dtype=np.int64)
else:
gt_bboxes = np.zeros((0, 4), dtype=np.float32)
gt_labels = np.array([], dtype=np.int64)
if gt_bboxes_ignore:
gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
else:
gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
seg_map = img_info['filename'].replace('jpg', 'png')
ann = dict(
bboxes=gt_bboxes,
labels=gt_labels,
bboxes_ignore=gt_bboxes_ignore,
masks=gt_masks_ann,
seg_map=seg_map)
return ann
def xyxy2xywh(self, bbox):
"""Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO
evaluation.
Args:
bbox (numpy.ndarray): The bounding boxes, shape (4, ), in
``xyxy`` order.
Returns:
list[float]: The converted bounding boxes, in ``xywh`` order.
"""
_bbox = bbox.tolist()
return [
_bbox[0],
_bbox[1],
_bbox[2] - _bbox[0],
_bbox[3] - _bbox[1],
]
def _proposal2json(self, results):
"""Convert proposal results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
bboxes = results[idx]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = 1
json_results.append(data)
return json_results
def _det2json(self, results):
"""Convert detection results to COCO json style."""
json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
result = results[idx]
for label in range(len(result)):
bboxes = result[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
json_results.append(data)
return json_results
def _segm2json(self, results):
"""Convert instance segmentation results to COCO json style."""
bbox_json_results = []
segm_json_results = []
for idx in range(len(self)):
img_id = self.img_ids[idx]
det, seg = results[idx]
for label in range(len(det)):
# bbox results
bboxes = det[label]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(bboxes[i][4])
data['category_id'] = self.cat_ids[label]
bbox_json_results.append(data)
# segm results
# some detectors use different scores for bbox and mask
if isinstance(seg, tuple):
segms = seg[0][label]
mask_score = seg[1][label]
else:
segms = seg[label]
mask_score = [bbox[4] for bbox in bboxes]
for i in range(bboxes.shape[0]):
data = dict()
data['image_id'] = img_id
data['bbox'] = self.xyxy2xywh(bboxes[i])
data['score'] = float(mask_score[i])
data['category_id'] = self.cat_ids[label]
if isinstance(segms[i]['counts'], bytes):
segms[i]['counts'] = segms[i]['counts'].decode()
data['segmentation'] = segms[i]
segm_json_results.append(data)
return bbox_json_results, segm_json_results
def results2json(self, results, outfile_prefix):
"""Dump the detection results to a COCO style json file.
There are 3 types of results: proposals, bbox predictions, mask
predictions, and they have different data types. This method will
automatically recognize the type, and dump them to json files.
Args:
results (list[list | tuple | ndarray]): Testing results of the
dataset.
outfile_prefix (str): The filename prefix of the json files. If the
prefix is "somepath/xxx", the json files will be named
"somepath/xxx.bbox.json", "somepath/xxx.segm.json",
"somepath/xxx.proposal.json".
Returns:
dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \
values are corresponding filenames.
"""
result_files = dict()
if isinstance(results[0], list):
json_results = self._det2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
mmcv.dump(json_results, result_files['bbox'])
elif isinstance(results[0], tuple):
json_results = self._segm2json(results)
result_files['bbox'] = f'{outfile_prefix}.bbox.json'
result_files['proposal'] = f'{outfile_prefix}.bbox.json'
result_files['segm'] = f'{outfile_prefix}.segm.json'
mmcv.dump(json_results[0], result_files['bbox'])
mmcv.dump(json_results[1], result_files['segm'])
elif isinstance(results[0], np.ndarray):
json_results = self._proposal2json(results)
result_files['proposal'] = f'{outfile_prefix}.proposal.json'
mmcv.dump(json_results, result_files['proposal'])
else:
raise TypeError('invalid type of results')
return result_files
def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):
gt_bboxes = []
for i in range(len(self.img_ids)):
ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])
ann_info = self.coco.load_anns(ann_ids)
if len(ann_info) == 0:
gt_bboxes.append(np.zeros((0, 4)))
continue
bboxes = []
for ann in ann_info:
if ann.get('ignore', False) or ann['iscrowd']:
continue
x1, y1, w, h = ann['bbox']
bboxes.append([x1, y1, x1 + w, y1 + h])
bboxes = np.array(bboxes, dtype=np.float32)
if bboxes.shape[0] == 0:
bboxes = np.zeros((0, 4))
gt_bboxes.append(bboxes)
recalls = eval_recalls(
gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)
ar = recalls.mean(axis=1)
return ar
def format_results(self, results, jsonfile_prefix=None, **kwargs):
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[tuple | numpy.ndarray]): Testing results of the
dataset.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
Returns:
tuple: (result_files, tmp_dir), result_files is a dict containing \
the json filepaths, tmp_dir is the temporal directory created \
for saving json files when jsonfile_prefix is not specified.
"""
assert isinstance(results, list), 'results must be a list'
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: {} != {}'.
format(len(results), len(self)))
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_files = self.results2json(results, jsonfile_prefix)
return result_files, tmp_dir
def evaluate(self,
results,
metric='bbox',
logger=None,
jsonfile_prefix=None,
classwise=False,
proposal_nums=(100, 300, 1000),
iou_thrs=None,
metric_items=None):
"""Evaluation in COCO protocol.
Args:
results (list[list | tuple]): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated. Options are
'bbox', 'segm', 'proposal', 'proposal_fast'.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Default: (100, 300, 1000).
iou_thrs (Sequence[float], optional): IoU threshold used for
evaluating recalls/mAPs. If set to a list, the average of all
IoUs will also be computed. If not specified, [0.50, 0.55,
0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
Default: None.
metric_items (list[str] | str, optional): Metric items that will
be returned. If not specified, ``['AR@100', 'AR@300',
'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
``metric=='bbox' or metric=='segm'``.
Returns:
dict[str, float]: COCO style evaluation metric.
"""
metrics = metric if isinstance(metric, list) else [metric]
allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
if iou_thrs is None:
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
if metric_items is not None:
if not isinstance(metric_items, list):
metric_items = [metric_items]
result_files, tmp_dir = self.format_results(results, jsonfile_prefix)
eval_results = {}
cocoGt = self.coco
for metric in metrics:
msg = f'Evaluating {metric}...'
if logger is None:
msg = '\n' + msg
print_log(msg, logger=logger)
if metric == 'proposal_fast':
ar = self.fast_eval_recall(
results, proposal_nums, iou_thrs, logger='silent')
log_msg = []
for i, num in enumerate(proposal_nums):
eval_results[f'AR@{num}'] = ar[i]
log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
log_msg = ''.join(log_msg)
print_log(log_msg, logger=logger)
continue
if metric not in result_files:
raise KeyError(f'{metric} is not in results')
try:
cocoDt = cocoGt.loadRes(result_files[metric])
except IndexError:
print_log(
'The testing results of the whole dataset is empty.',
logger=logger,
level=logging.ERROR)
break
iou_type = 'bbox' if metric == 'proposal' else metric
cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
cocoEval.params.catIds = self.cat_ids
cocoEval.params.imgIds = self.img_ids
cocoEval.params.maxDets = list(proposal_nums)
cocoEval.params.iouThrs = iou_thrs
# mapping of cocoEval.stats
coco_metric_names = {
'mAP': 0,
'mAP_50': 1,
'mAP_75': 2,
'mAP_s': 3,
'mAP_m': 4,
'mAP_l': 5,
'AR@100': 6,
'AR@300': 7,
'AR@1000': 8,
'AR_s@1000': 9,
'AR_m@1000': 10,
'AR_l@1000': 11
}
if metric_items is not None:
for metric_item in metric_items:
if metric_item not in coco_metric_names:
raise KeyError(
f'metric item {metric_item} is not supported')
if metric == 'proposal':
cocoEval.params.useCats = 0
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if metric_items is None:
metric_items = [
'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
'AR_m@1000', 'AR_l@1000'
]
for item in metric_items:
val = float(
f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
eval_results[item] = val
else:
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if classwise: # Compute per-category AP
# Compute per-category AP
# from https://github.com/facebookresearch/detectron2/
precisions = cocoEval.eval['precision']
# precision: (iou, recall, cls, area range, max dets)
assert len(self.cat_ids) == precisions.shape[2]
results_per_category = []
for idx, catId in enumerate(self.cat_ids):
# area range index 0: all area ranges
# max dets index -1: typically 100 per image
nm = self.coco.loadCats(catId)[0]
precision = precisions[:, :, idx, 0, -1]
precision = precision[precision > -1]
if precision.size:
ap = np.mean(precision)
else:
ap = float('nan')
results_per_category.append(
(f'{nm["name"]}', f'{float(ap):0.3f}'))
num_columns = min(6, len(results_per_category) * 2)
results_flatten = list(
itertools.chain(*results_per_category))
headers = ['category', 'AP'] * (num_columns // 2)
results_2d = itertools.zip_longest(*[
results_flatten[i::num_columns]
for i in range(num_columns)
])
table_data = [headers]
table_data += [result for result in results_2d]
table = AsciiTable(table_data)
print_log('\n' + table.table, logger=logger)
if metric_items is None:
metric_items = [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
for metric_item in metric_items:
key = f'{metric}_{metric_item}'
val = float(
f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
)
eval_results[key] = val
ap = cocoEval.stats[:6]
eval_results[f'{metric}_mAP_copypaste'] = (
f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
f'{ap[4]:.3f} {ap[5]:.3f}')
if tmp_dir is not None:
tmp_dir.cleanup()
return eval_results
| 41.793372 | 79 | 0.528871 | [
"Apache-2.0"
] | invite-you/mmdetection | mmdet/datasets/coco_car.py | 21,440 | Python |
from IMDB_task4 import scrape_movie_details
from pprint import pprint
import os,requests,json,time,random
from IMDB_task1 import scrape_top_list
# task13
# this task for the make a json file ini our directory
def save_data():
movies_data = scrape_top_list()
for one_movie in movies_data :
id_movie = (one_movie['urls'][-10:-1])
exists = os.path.exists("screpingdata/" + str(id_movie) + ".json")
cwd = os.getcwd()
if exists:
with open(cwd+"/screpingdata/" + str(id_movie) + ".json","r+") as file :
data = file.read()
load_data = json.loads(data)
return (load_data)
else:
for one_movie in movies_data :
id_movie = (one_movie['urls'][-10:-1])
# task_no. 9
sleep_time = random.randint(1,3)
time.sleep(sleep_time)
url = (one_movie["urls"])
screpe_movie_data = scrape_movie_details(url)
with open("screpingdata/" + str(id_movie) + ".json","w") as file :
data = json.dumps(screpe_movie_data,indent=4, sort_keys = True)
write_data = file.write(data)
return (write_data)
pprint (save_data())
| 34.064516 | 75 | 0.688447 | [
"MIT"
] | amansharmma/IMDB-movie-scraper | IMDB_task8.py | 1,056 | Python |
#
# Copyright (c) 2015-2016 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import six
from nfv_common.helpers import Constant
from nfv_common.helpers import Constants
from nfv_common.helpers import Singleton
@six.add_metaclass(Singleton)
class NfviErrorCodes(Constants):
"""
NFVI - Error Code Constants
"""
TOKEN_EXPIRED = Constant('token-expired')
NOT_FOUND = Constant('not-found')
# Constant Instantiation
NFVI_ERROR_CODE = NfviErrorCodes()
| 20.333333 | 50 | 0.747951 | [
"Apache-2.0"
] | SidneyAn/nfv | nfv/nfv-vim/nfv_vim/nfvi/_nfvi_defs.py | 488 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-06-17 14:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='bookdetail',
name='language',
field=models.CharField(blank=True, choices=[('CN', '中文'), ('EN', 'English')], help_text='现暂时提供两种语言类别', max_length=5, null=True, verbose_name='文章详情语言类别'),
),
migrations.AddField(
model_name='booknotedetail',
name='language',
field=models.CharField(blank=True, choices=[('CN', '中文'), ('EN', 'English')], help_text='现暂时提供两种语言类别', max_length=5, null=True, verbose_name='文章详情语言类别'),
),
migrations.AddField(
model_name='booknoteinfo',
name='chapter',
field=models.CharField(default='', help_text='所属章节', max_length=20, verbose_name='章节'),
),
]
| 33.516129 | 165 | 0.599615 | [
"Apache-2.0"
] | LennonChin/Blog-Back-Project | apps/book/migrations/0002_auto_20180617_2210.py | 1,135 | Python |
from django.apps import AppConfig
class LibookapiConfig(AppConfig):
name = 'libookapi'
| 15.5 | 33 | 0.763441 | [
"Apache-2.0"
] | sjtu-libook/libook | libookapi/apps.py | 93 | Python |
from django.db import models
from django.utils.translation import ugettext_lazy as _
from filer.models.imagemodels import Image
from fractions import Fraction
import exifread
class ExifData(models.Model):
class Meta:
verbose_name = _('EXIF Data')
verbose_name_plural = _('EXIF data')
image = models.OneToOneField(
Image,
verbose_name=_('Image'),
)
focal_length = models.CharField(
max_length=100,
verbose_name=_('Focal length'),
blank=True,
)
iso = models.CharField(
max_length=100,
verbose_name=_('ISO'),
blank=True,
)
fraction = models.CharField(
max_length=100,
verbose_name=_('Fraction'),
blank=True,
)
exposure_time = models.CharField(
max_length=100,
verbose_name=_('Exposure time'),
blank=True,
)
def save(self, *args, **kwargs):
if self.pk is None:
self._read_exif(self.image.file)
super(ExifData, self).save(*args, **kwargs)
@property
def as_list(self):
tmp = (self.focal_length, self.fraction, self.exposure_time, self.iso)
return filter(lambda x: x, tmp)
def _read_exif(self, file):
result = {}
try:
file.open('rb')
# read tags
tags = exifread.process_file(file)
# get necessary tags
self.focal_length = self._get_and_format(tags,
"EXIF FocalLength",
"%gmm", lambda s: Fraction(s))
self.iso = self._get_and_format(tags,
"EXIF ISOSpeedRatings", "ISO %d",
lambda s: int(s))
self.fraction = self._get_and_format(tags, "EXIF FNumber", "f/%g",
lambda s: float(Fraction(s)))
# format exposure time (fraction or float)
exposure_time = self._get_and_format(tags, "EXIF ExposureTime",
None, lambda s: Fraction(s))
exposure_time_str = ""
if exposure_time:
if exposure_time >= 1:
exposure_time_str = "%gs" % exposure_time
else:
exposure_time_str = "%ss" % str(exposure_time)
self.exposure_time = exposure_time_str
except IOError as e:
pass
def _get_and_format(self, tags, key, format, convertfunc):
"""
Gets element with "key" from dict "tags". Converts this data with
convertfunc and inserts it into the formatstring "format".
If "format" is None, the data is returned without formatting, conversion
is done.
It the key is not in the dict, the empty string is returned.
"""
data = tags.get(key, None)
if data:
data = convertfunc(str(data))
if format:
return format % data
return data
return ""
| 30.349515 | 83 | 0.53039 | [
"MIT"
] | svenhertle/django_image_exif | image_exif/models.py | 3,126 | Python |
from decimal import Decimal
import logging
from django.core.exceptions import ImproperlyConfigured
from suds import WebFault
from suds.transport import TransportError
import vatnumber
import stdnum
from plans.taxation import TaxationPolicy
logger = logging.getLogger('plans.taxation.eu.vies')
class EUTaxationPolicy(TaxationPolicy):
"""
This taxation policy should be correct for all EU countries. It uses following rules:
* if issuer country is not in EU - assert error,
* for buyer of the same country as issuer - return issuer tax,
* for company buyer from EU (with VIES) returns VAT n/a reverse charge,
* for non-company buyer from EU returns VAT from buyer country,
* for non-EU buyer return VAT n/a.
This taxation policy was updated at 1 Jan 2015 after new UE VAT regulations. You should also probably
register in MOSS system.
"""
# Standard VAT rates according to
# http://ec.europa.eu/taxation_customs/resources/documents/taxation/vat/how_vat_works/rates/vat_rates_en.pdf
# Situation at 1 Jan 2017
EU_COUNTRIES_VAT = {
'BE': Decimal('21'), # Belgium
'BG': Decimal('20'), # Bulgaria
'CZ': Decimal('21'), # Czech Republic
'DK': Decimal('25'), # Denmark
'DE': Decimal('19'), # Germany
'EE': Decimal('20'), # Estonia
'EL': Decimal('24'), # Greece
'ES': Decimal('21'), # Spain
'FR': Decimal('20'), # France
'HR': Decimal('25'), # Croatia
'IE': Decimal('23'), # Ireland
'IT': Decimal('22'), # Italy
'CY': Decimal('19'), # Cyprus
'LV': Decimal('21'), # Latvia
'LT': Decimal('21'), # Lithuania
'LU': Decimal('17'), # Luxembourg
'HU': Decimal('27'), # Hungary
'MT': Decimal('18'), # Malta
'NL': Decimal('21'), # Netherlands
'AT': Decimal('20'), # Austria
'PL': Decimal('23'), # Poland
'PT': Decimal('23'), # Portugal
'RO': Decimal('19'), # Romania
'SI': Decimal('22'), # Slovenia
'SK': Decimal('20'), # Slovakia
'FI': Decimal('24'), # Finland
'SE': Decimal('25'), # Sweden
'GB': Decimal('20'), # United Kingdom (Great Britain)
}
@classmethod
def is_in_EU(cls, country_code):
return country_code.upper() in cls.EU_COUNTRIES_VAT
@classmethod
def get_default_tax(cls):
issuer_country_code = cls.get_issuer_country_code()
try:
return cls.EU_COUNTRIES_VAT[issuer_country_code]
except KeyError:
raise ImproperlyConfigured("EUTaxationPolicy requires that issuer country is in EU")
@classmethod
def get_tax_rate(cls, tax_id, country_code):
issuer_country_code = cls.get_issuer_country_code()
if not cls.is_in_EU(issuer_country_code):
raise ImproperlyConfigured("EUTaxationPolicy requires that issuer country is in EU")
if not tax_id and not country_code:
# No vat id, no country
return cls.get_default_tax()
elif not tax_id and country_code:
# Customer is not a company, we know his country
if cls.is_in_EU(country_code):
# Customer (private person) is from a EU
# Customer pays his VAT rate
return cls.EU_COUNTRIES_VAT[country_code]
else:
# Customer (private person) not from EU
# VAT n/a
return None
else:
# Customer is company, we now country and vat id
if country_code.upper() == issuer_country_code.upper():
# Company is from the same country as issuer
# Normal tax
return cls.get_default_tax()
if cls.is_in_EU(country_code):
# Company is from other EU country
try:
vies_result = vatnumber.check_vies(tax_id)
logger.info("TAX_ID=%s RESULT=%s" % (tax_id, vies_result))
if tax_id and vies_result:
# Company is registered in VIES
# Charge back
return None
else:
return cls.EU_COUNTRIES_VAT[country_code]
except (WebFault, TransportError, stdnum.exceptions.InvalidComponent):
# If we could not connect to VIES or the VAT ID is incorrect
logger.exception("TAX_ID=%s" % (tax_id))
return cls.EU_COUNTRIES_VAT[country_code]
else:
# Company is not from EU
# VAT n/a
return None
| 39.066116 | 112 | 0.579437 | [
"MIT"
] | ShreeshaRelysys/django-plans | plans/taxation/eu.py | 4,727 | Python |
#! /usr/bin/python3
# -*- coding: utf-8 -*-
##############################################################################
# Copyright 2020 AlexPDev
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Module for testing procedures on Check Tab."""
import os
from pathlib import Path
import pytest
from tests import dir1, dir2, rmpath, tempfile, ttorrent, wind
from torrentfileQt.checkTab import ProgressBar, TreePieceItem, TreeWidget
def test_fixture():
"""Test Fixtures."""
assert dir1 and dir2 and ttorrent and wind
def test_missing_files_check(dir2, ttorrent, wind):
"""Test missing files checker proceduire."""
window, _ = wind
checktab = window.central.checkWidget
window.central.setCurrentWidget(checktab)
dirpath = Path(dir2)
for item in dirpath.iterdir():
if item.is_file():
os.remove(item)
checktab.fileInput.setText(ttorrent)
checktab.searchInput.setText(dir2)
checktab.checkButton.click()
assert checktab.treeWidget.topLevelItemCount() > 0
def test_shorter_files_check(wind, ttorrent, dir2):
"""Test missing files checker proceduire."""
window, _ = wind
checktab = window.central.checkWidget
dirpath = Path(dir2)
window.central.setCurrentWidget(checktab)
def shortenfile(item):
"""Shave some data off the end of file."""
temp = bytearray(2 ** 19)
with open(item, "rb") as fd:
fd.readinto(temp)
with open(item, "wb") as fd:
fd.write(temp)
if os.path.exists(dirpath):
for item in dirpath.iterdir():
if item.is_file():
shortenfile(item)
checktab.fileInput.setText(ttorrent)
checktab.searchInput.setText(dir2)
checktab.checkButton.click()
assert checktab.treeWidget.topLevelItemCount() > 0
def test_check_tab(wind, ttorrent, dir1):
"""Test checker procedure."""
window, _ = wind
checktab = window.central.checkWidget
window.central.setCurrentWidget(checktab)
checktab.fileInput.setText(ttorrent)
checktab.searchInput.setText(dir1)
checktab.checkButton.click()
assert checktab.textEdit.toPlainText() != ""
def test_check_tab_input1(wind, dir1):
"""Test checker procedure."""
window, _ = wind
checktab = window.central.checkWidget
window.central.setCurrentWidget(checktab)
checktab.browseButton2.browse(dir1)
assert checktab.searchInput.text() != ""
def test_check_tab_input_2(wind, dir1):
"""Test checker procedure."""
window, _ = wind
checktab = window.central.checkWidget
window.central.setCurrentWidget(checktab)
checktab.browseButton1.browse(dir1)
assert checktab.fileInput.text() != ""
def test_check_tab4(wind):
"""Test checker procedure again."""
window, _ = wind
checktab = window.central.checkWidget
window.central.setCurrentWidget(checktab)
tree_widget = checktab.treeWidget
assert tree_widget.invisibleRootItem() is not None
def test_clear_logtext(wind):
"""Test checker logTextEdit widget function."""
window, _ = wind
checktab = window.central.checkWidget
window.central.setCurrentWidget(checktab)
text_edit = checktab.textEdit
text_edit.insertPlainText("sometext")
text_edit.clear_data()
assert text_edit.toPlainText() == ""
def test_checktab_tree(wind):
"""Check tree item counting functionality."""
window, _ = wind
checktab = window.central.checkWidget
window.central.setCurrentWidget(checktab)
tree = TreeWidget(parent=checktab)
item = TreePieceItem(type=0, tree=tree)
item.progbar = ProgressBar(parent=tree, size=1000000)
item.count(100000000)
assert item.counted == 1000000
@pytest.mark.parametrize("size", list(range(18, 20)))
@pytest.mark.parametrize("index", list(range(1, 7, 2)))
@pytest.mark.parametrize("version", [1, 2, 3])
@pytest.mark.parametrize("ext", [".mkv", ".rar", ".r00", ".mp3"])
def test_singlefile(size, ext, index, version, wind):
"""Test the singlefile for create and check tabs."""
window, _ = wind
createtab = window.central.createWidget
checktab = window.central.checkWidget
window.central.setCurrentWidget(checktab)
testfile = str(tempfile(exp=size))
tfile = testfile + ext
os.rename(testfile, tfile)
metafile = tfile + ".torrent"
createtab.path_input.clear()
createtab.output_input.clear()
createtab.browse_file_button.browse(tfile)
createtab.output_input.setText(metafile)
createtab.piece_length.setCurrentIndex(index)
btns = [createtab.v1button, createtab.v2button, createtab.hybridbutton]
for i, btn in enumerate(btns):
if i + 1 == version:
btn.click()
break
createtab.submit_button.click()
createtab.submit_button.join()
checktab.fileInput.clear()
checktab.searchInput.clear()
checktab.fileInput.setText(metafile)
checktab.searchInput.setText(tfile)
checktab.checkButton.click()
ptext = checktab.textEdit.toPlainText()
assert "100%" in ptext
rmpath(tfile, metafile)
| 33.152941 | 78 | 0.683996 | [
"Apache-2.0"
] | alexpdev/Torrentfile-GUI | tests/test_checktab.py | 5,636 | Python |
"""Internal Exception classes used by package
These classes subclass the base Exception class
Classes
_______
MissingPortException(Exception)
SerialReadException(Exception)
UnknownConfirmationCodeException(Exception)
"""
class MissingPortException(Exception):
"""
Exception raised when the port param is missing when instantiating
the AdafruitFingerprin class
"""
class SerialReadException(Exception):
"""Exception raised when no data is read from the serial port"""
class UnknownConfirmationCodeException(Exception):
"""Exception raised when package content is an invalid response"""
| 22.888889 | 70 | 0.786408 | [
"MIT"
] | cerebrohivetech/adafruit-fingerprint | adafruit_fingerprint/exceptions.py | 618 | Python |
"""Python Compatibility Utilities."""
from __future__ import annotations
import numbers
import sys
from contextlib import contextmanager
from functools import wraps
try:
from importlib import metadata as importlib_metadata
except ImportError:
# TODO: Remove this when we drop support for Python 3.7
import importlib_metadata
from io import UnsupportedOperation
from kombu.exceptions import reraise
FILENO_ERRORS = (AttributeError, ValueError, UnsupportedOperation)
try:
from billiard.util import register_after_fork
except ImportError: # pragma: no cover
try:
from multiprocessing.util import register_after_fork
except ImportError:
register_after_fork = None
_environment = None
def coro(gen):
"""Decorator to mark generator as co-routine."""
@wraps(gen)
def wind_up(*args, **kwargs):
it = gen(*args, **kwargs)
next(it)
return it
return wind_up
def _detect_environment():
# ## -eventlet-
if 'eventlet' in sys.modules:
try:
import socket
from eventlet.patcher import is_monkey_patched as is_eventlet
if is_eventlet(socket):
return 'eventlet'
except ImportError:
pass
# ## -gevent-
if 'gevent' in sys.modules:
try:
import socket
from gevent import socket as _gsocket
if socket.socket is _gsocket.socket:
return 'gevent'
except ImportError:
pass
return 'default'
def detect_environment():
"""Detect the current environment: default, eventlet, or gevent."""
global _environment
if _environment is None:
_environment = _detect_environment()
return _environment
def entrypoints(namespace):
"""Return setuptools entrypoints for namespace."""
if sys.version_info >= (3,10):
entry_points = importlib_metadata.entry_points(group=namespace)
else:
entry_points = importlib_metadata.entry_points().get(namespace, [])
return (
(ep, ep.load())
for ep in entry_points
)
def fileno(f):
"""Get fileno from file-like object."""
if isinstance(f, numbers.Integral):
return f
return f.fileno()
def maybe_fileno(f):
"""Get object fileno, or :const:`None` if not defined."""
try:
return fileno(f)
except FILENO_ERRORS:
pass
@contextmanager
def nested(*managers): # pragma: no cover
"""Nest context managers."""
# flake8: noqa
exits = []
vars = []
exc = (None, None, None)
try:
try:
for mgr in managers:
exit = mgr.__exit__
enter = mgr.__enter__
vars.append(enter())
exits.append(exit)
yield vars
except:
exc = sys.exc_info()
finally:
while exits:
exit = exits.pop()
try:
if exit(*exc):
exc = (None, None, None)
except:
exc = sys.exc_info()
if exc != (None, None, None):
# Don't rely on sys.exc_info() still containing
# the right information. Another exception may
# have been raised and caught by an exit method
reraise(exc[0], exc[1], exc[2])
finally:
del(exc)
| 24.57554 | 75 | 0.592799 | [
"BSD-3-Clause"
] | CountRedClaw/kombu | kombu/utils/compat.py | 3,416 | Python |
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ model.py ]
# Synopsis [ the 1-hidden model ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
import torch
import torch.nn as nn
import torch.nn.functional as F
class ConvBank(nn.Module):
def __init__(self, input_dim, output_class_num, kernels, cnn_size, hidden_size, dropout, **kwargs):
super(ConvBank, self).__init__()
self.drop_p = dropout
self.in_linear = nn.Linear(input_dim, hidden_size)
latest_size = hidden_size
# conv bank
self.cnns = nn.ModuleList()
assert len(kernels) > 0
for kernel in kernels:
self.cnns.append(nn.Conv1d(latest_size, cnn_size, kernel, padding=kernel//2))
latest_size = cnn_size * len(kernels)
self.out_linear = nn.Linear(latest_size, output_class_num)
def forward(self, features):
hidden = F.dropout(F.relu(self.in_linear(features)), p=self.drop_p)
conv_feats = []
hidden = hidden.transpose(1, 2).contiguous()
for cnn in self.cnns:
conv_feats.append(cnn(hidden))
hidden = torch.cat(conv_feats, dim=1).transpose(1, 2).contiguous()
hidden = F.dropout(F.relu(hidden), p=self.drop_p)
predicted = self.out_linear(hidden)
return predicted
class Framelevel1Hidden(nn.Module):
def __init__(self, input_dim, output_class_num, hidden_size, dropout, **kwargs):
super(Framelevel1Hidden, self).__init__()
# init attributes
self.in_linear = nn.Linear(input_dim, hidden_size)
self.out_linear = nn.Linear(hidden_size, output_class_num)
self.drop = nn.Dropout(dropout)
self.act_fn = nn.functional.relu
def forward(self, features):
hidden = self.in_linear(features)
hidden = self.drop(hidden)
hidden = self.act_fn(hidden)
predicted = self.out_linear(hidden)
return predicted
| 35.370968 | 103 | 0.574099 | [
"MIT"
] | AmirHussein96/Self-Supervised-Speech-Pretraining-and-Representation-Learning | downstream/libri_phone/model.py | 2,193 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyBiopandas(PythonPackage):
"""Working with molecular structures in pandas DataFrames"""
homepage = "https://rasbt.github.io/biopandas"
pypi = "biopandas/biopandas-0.2.5.tar.gz"
git = "https://github.com/rasbt/biopandas.git"
# Note that the source package on PyPi is broken as it
# is missing the requirements.txt so we have to download
# from github
version('0.2.5', branch="v0.2.5")
depends_on('python@3.5:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pandas@0.24.2:', type=('build', 'run'))
depends_on('py-numpy@1.16.2:', type=('build', 'run'))
| 33.653846 | 73 | 0.674286 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | Bambi/spack | var/spack/repos/builtin/packages/py-biopandas/package.py | 875 | Python |
from pprint import pprint
import re
import pexpect
import yaml
def cisco_send_show_command(host, username, password, enable_pass, command):
with pexpect.spawn(f"ssh {username}@{host}", encoding="utf-8") as ssh:
ssh.expect("[Pp]assword")
ssh.sendline(password)
ssh.expect(">")
ssh.sendline("enable")
ssh.expect("Password")
ssh.sendline(enable_pass)
ssh.expect("#")
output = ""
ssh.sendline(command)
while True:
match = ssh.expect(["#", "--More--", pexpect.TIMEOUT], timeout=5)
page = ssh.before
page = re.sub(r" +\x08+ +\x08+", "\n", page)
output += page
if match == 0:
break
elif match == 1:
ssh.send(" ")
else:
break
return output.replace("\r\n", "\n")
if __name__ == "__main__":
with open("devices.yaml") as f:
devices = yaml.safe_load(f)
r1 = devices[0]
out = cisco_send_show_command(**r1, command="sh run")
with open("result_r1.txt", "w") as f:
f.write(out)
| 26.785714 | 77 | 0.532444 | [
"MIT"
] | levs72/pyneng-examples | ssh_telnet/pexpect/ex06_ssh_pexpect_show_with_paging.py | 1,125 | Python |
import argparse
import shutil
import sys
import time
from datetime import timedelta
from pathlib import Path
import torch
from openunreid.apis import BaseRunner, test_reid
from openunreid.core.solvers import build_lr_scheduler, build_optimizer
from openunreid.data import build_test_dataloader, build_train_dataloader
from openunreid.models import build_model
from openunreid.models.losses import build_loss
from openunreid.utils.config import (
cfg,
cfg_from_list,
cfg_from_yaml_file,
log_config_to_file,
)
from openunreid.utils.dist_utils import init_dist, synchronize
from openunreid.utils.file_utils import mkdir_if_missing
from openunreid.utils.logger import Logger
def parge_config():
parser = argparse.ArgumentParser(description="UDA_TP training")
parser.add_argument("config", help="train config file path")
parser.add_argument(
"--work-dir", help="the dir to save logs and models", default=""
)
parser.add_argument("--resume-from", help="the checkpoint file to resume from")
parser.add_argument(
"--launcher",
type=str,
choices=["none", "pytorch", "slurm"],
default="none",
help="job launcher",
)
parser.add_argument("--tcp-port", type=str, default="5017")
parser.add_argument(
"--set",
dest="set_cfgs",
default=None,
nargs=argparse.REMAINDER,
help="set extra config keys if needed",
)
args = parser.parse_args()
cfg_from_yaml_file(args.config, cfg)
cfg.launcher = args.launcher
cfg.tcp_port = args.tcp_port
if not args.work_dir:
args.work_dir = Path(args.config).stem
cfg.work_dir = cfg.LOGS_ROOT / args.work_dir
mkdir_if_missing(cfg.work_dir)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs, cfg)
shutil.copy(args.config, cfg.work_dir / "config.yaml")
return args, cfg
def main():
start_time = time.monotonic()
# init distributed training
args, cfg = parge_config()
dist = init_dist(cfg)
synchronize()
# init logging file
logger = Logger(cfg.work_dir / "log.txt", debug=False)
sys.stdout = logger
print("==========\nArgs:{}\n==========".format(args))
log_config_to_file(cfg)
# build train loader
train_loader, train_sets = build_train_dataloader(cfg)
# build model
model = build_model(cfg, 0, init=cfg.MODEL.source_pretrained)
model.cuda()
if dist:
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[cfg.gpu],
output_device=cfg.gpu,
find_unused_parameters=True,
)
elif cfg.total_gpus > 1:
model = torch.nn.DataParallel(model)
# build optimizer
optimizer = build_optimizer([model,], **cfg.TRAIN.OPTIM)
# build lr_scheduler
if cfg.TRAIN.SCHEDULER.lr_scheduler is not None:
lr_scheduler = build_lr_scheduler(optimizer, **cfg.TRAIN.SCHEDULER)
else:
lr_scheduler = None
# build loss functions
criterions = build_loss(cfg.TRAIN.LOSS, triplet_key="feat", cuda=True)
# build runner
runner = BaseRunner(
cfg,
model,
optimizer,
criterions,
train_loader,
train_sets=train_sets,
lr_scheduler=lr_scheduler,
meter_formats={"Time": ":.3f"},
reset_optim=False,
)
# resume
if args.resume_from:
runner.resume(args.resume_from)
# start training
runner.run()
# load the best model
runner.resume(cfg.work_dir / "model_best.pth")
# final testing
test_loaders, queries, galleries = build_test_dataloader(cfg)
for i, (loader, query, gallery) in enumerate(zip(test_loaders, queries, galleries)):
cmc, mAP = test_reid(
cfg, model, loader, query, gallery, dataset_name=cfg.TEST.datasets[i]
)
# print time
end_time = time.monotonic()
print("Total running time: ", timedelta(seconds=end_time - start_time))
if __name__ == "__main__":
main()
| 27.813793 | 88 | 0.666997 | [
"Apache-2.0"
] | gwanglee/OpenUnReID | tools/UDA_TP/main.py | 4,033 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the sort wrapper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.framework.python.ops import sort_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class SortTest(test.TestCase):
def testRandom_lowDimensionality(self):
self._testRandom_lowDimensionality(negative_axis=False)
def testRandom_lowDimensionality_negative(self):
self._testRandom_lowDimensionality(negative_axis=True)
def _testRandom_lowDimensionality(self, negative_axis):
np.random.seed(42)
for _ in range(20):
rank = np.random.randint(1, 3)
shape = [np.random.randint(0, 20) for _ in range(rank)]
arr = np.random.random(shape)
sort_axis = np.random.choice(rank)
if negative_axis:
sort_axis = -1 - sort_axis
with self.cached_session():
self.assertAllEqual(
np.sort(arr, axis=sort_axis),
sort_ops.sort(constant_op.constant(arr), axis=sort_axis).eval())
def testRandom_highDimensionality(self):
np.random.seed(100)
for _ in range(20):
rank = np.random.randint(5, 15)
shape = [np.random.randint(1, 4) for _ in range(rank)]
arr = np.random.random(shape)
sort_axis = np.random.choice(rank)
with self.cached_session():
self.assertAllEqual(
np.sort(arr, axis=sort_axis),
sort_ops.sort(constant_op.constant(arr), axis=sort_axis).eval())
def testScalar(self):
# Create an empty scalar where the static shape is unknown.
zeros_length_1 = array_ops.zeros(
random_ops.random_uniform([1], minval=0, maxval=1, dtype=dtypes.int32),
dtype=dtypes.int32)
scalar = array_ops.zeros(zeros_length_1)
sort = sort_ops.sort(scalar)
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
sort.eval()
def testNegativeOutOfBounds_staticShape(self):
arr = constant_op.constant([3, 4, 5])
with self.assertRaises(ValueError):
sort_ops.sort(arr, axis=-4)
def testDescending(self):
arr = np.random.random((10, 5, 5))
with self.cached_session():
self.assertAllEqual(
np.sort(arr, axis=0)[::-1],
sort_ops.sort(
constant_op.constant(arr),
axis=0,
direction='DESCENDING').eval())
def testSort_staticallyKnownRank_constantTransposition(self):
# The transposition array should be a constant if the rank of "values" is
# statically known.
tensor = random_ops.random_uniform(
# Rank is statically known to be 5, but the dimension lengths are not
# known.
random_ops.random_uniform(
shape=(5,), minval=0, maxval=10, dtype=dtypes.int32))
sort_ops.sort(tensor, axis=1)
transposition = (
ops.get_default_graph().get_tensor_by_name('sort/transposition:0'))
self.assertFalse(tensor_util.constant_value(transposition) is None)
self.assertAllEqual(
# Swaps "1" and "4" to put "1" at the end.
tensor_util.constant_value(transposition),
[0, 4, 2, 3, 1])
def testArgsort_1d(self):
arr = np.random.random(42)
with self.cached_session():
self.assertAllEqual(
np.sort(arr),
array_ops.gather(arr, sort_ops.argsort(arr)).eval())
def testArgsort(self):
arr = np.random.random((5, 6, 7, 8))
for axis in range(4):
with self.cached_session():
self.assertAllEqual(
np.argsort(arr, axis=axis),
sort_ops.argsort(arr, axis=axis).eval())
if __name__ == '__main__':
test.main()
| 35.823077 | 80 | 0.684131 | [
"Apache-2.0"
] | 2017qiuju/tensorflow | tensorflow/contrib/framework/python/ops/sort_ops_test.py | 4,657 | Python |
import gflags
import sys
import torch
from spinn.util.misc import recursively_set_device
FLAGS = gflags.FLAGS
def convert(inpt, outp, gpu=-1):
ckpt = torch.load(inpt)
if gpu < 0:
ckpt['model_state_dict'] = {k: v.cpu() for k, v in ckpt['model_state_dict'].items()}
else:
ckpt['model_state_dict'] = {k: v.cuda() for k, v in ckpt['model_state_dict'].items()}
ckpt['optimizer_state_dict'] = recursively_set_device(ckpt['optimizer_state_dict'], gpu)
torch.save(ckpt, FLAGS.outp)
if __name__ == '__main__':
gflags.DEFINE_string("inpt", None, "")
gflags.DEFINE_string("outp", None, "")
gflags.DEFINE_integer("gpu", -1, "")
FLAGS(sys.argv)
convert(FLAGS.inpt, FLAGS.outp)
| 26 | 93 | 0.666209 | [
"Apache-2.0"
] | NYU-CDS-Capstone-Project/Betelgeuse_SPINN | scripts/convert_ckpt.py | 728 | Python |