id
int64 0
300k
| label
stringlengths 1
74
⌀ | text
stringlengths 4k
8k
|
---|---|---|
299,800 | test bf16 storage legalize | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import tvm
import tvm.script
from tvm.script import tir as T
def get_before():
@tvm.script.ir_module
class Before:
@T.prim_func
def main(
Aptr: T.handle("bfloat16"), Bptr: T.handle("bfloat16"), Dptr: T.handle("bfloat16")
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "bfloat16")
for i in T.grid(100):
C[i] = A[i] + B[i]
D[i] = T.exp(C[i])
return Before
def u16tof32(v):
uint32_v = v.astype("uint32")
uint32_v = uint32_v << tvm.tir.const(16, "uint32")
return T.reinterpret("float32", uint32_v)
def bf16tof32(v):
return u16tof32(T.reinterpret("uint16", v))
def f32tou16(v):
uint32_v = T.reinterpret("uint32", v)
rounding_bias = (uint32_v >> tvm.tir.const(16, "uint32")) & tvm.tir.const(1, "uint32")
rounding_bias += tvm.tir.const(0x7FFF, "uint32")
uint32_v = uint32_v + rounding_bias
return (uint32_v >> tvm.tir.const(16, "uint32")).astype("uint16")
def f32tobf16(v):
return T.reinterpret("bfloat16", f32tou16(v))
def get_after_compute_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(
Aptr: T.handle("bfloat16"), Bptr: T.handle("bfloat16"), Dptr: T.handle("bfloat16")
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = bf16tof32(A[i]) + bf16tof32(B[i])
D[i] = f32tobf16(T.exp(C[i]))
return After
def get_after_storage_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(Aptr: T.handle("uint16"), Bptr: T.handle("uint16"), Dptr: T.handle("uint16")):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "uint16", data=Aptr)
B = T.decl_buffer((100,), "uint16", data=Bptr)
D = T.decl_buffer((100,), "uint16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = u16tof32(A[i]) + u16tof32(B[i])
D[i] = f32tou16(T.exp(C[i]))
return After
def test_bf16_compute_legalize():
before = get_before()
expected = get_after_compute_legalize()
# run the transform twice to ensure we can afford to deal
# with this repeative optimizations
after = tvm.tir.transform.BF16ComputeLegalize()(before)
after = tvm.tir.transform.BF16ComputeLegalize()(after)
tvm.ir.assert_structural_equal(after, expected)
def METHOD_NAME():
before = get_after_compute_legalize()
after = tvm.tir.transform.BF16StorageLegalize()(before)
expected = get_after_storage_legalize()
tvm.ir.assert_structural_equal(after, expected)
def test_bf16_storage_scope():
def get_before():
@tvm.script.ir_module
class Before:
@T.prim_func
def main(
Aptr: T.handle("bfloat16", storage_scope="shared"),
Bptr: T.handle("bfloat16", storage_scope="local"),
Dptr: T.handle("bfloat16"),
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "bfloat16")
for i in T.grid(100):
C[i] = A[i] + B[i]
D[i] = T.exp(C[i])
return Before
def after_compute_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(
Aptr: T.handle("bfloat16", storage_scope="shared"),
Bptr: T.handle("bfloat16", storage_scope="local"),
Dptr: T.handle("bfloat16"),
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "bfloat16", data=Aptr)
B = T.decl_buffer((100,), "bfloat16", data=Bptr)
D = T.decl_buffer((100,), "bfloat16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = bf16tof32(A[i]) + bf16tof32(B[i])
D[i] = f32tobf16(T.exp(C[i]))
return After
def after_storage_legalize():
@tvm.script.ir_module
class After:
@T.prim_func
def main(
Aptr: T.handle("uint16", storage_scope="shared"),
Bptr: T.handle("uint16", storage_scope="local"),
Dptr: T.handle("uint16"),
):
T.func_attr({"global_symbol": "main"})
A = T.decl_buffer((100,), "uint16", data=Aptr)
B = T.decl_buffer((100,), "uint16", data=Bptr)
D = T.decl_buffer((100,), "uint16", data=Dptr)
C = T.decl_buffer((100,), "float32")
for i in T.grid(100):
C[i] = u16tof32(A[i]) + u16tof32(B[i])
D[i] = f32tou16(T.exp(C[i]))
return After
before = get_before()
after_compute = tvm.tir.transform.BF16ComputeLegalize()(before)
after_storage = tvm.tir.transform.BF16StorageLegalize()(after_compute)
tvm.ir.assert_structural_equal(after_compute, after_compute_legalize())
tvm.ir.assert_structural_equal(after_storage, after_storage_legalize())
if __name__ == "__main__":
METHOD_NAME()
test_bf16_storage_scope() |
299,801 | test suite | """Tests for distutils.command.check."""
import os
import textwrap
import unittest
from test.support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
try:
import pygments
except ImportError:
pygments = None
HERE = os.path.dirname(__file__)
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
if cwd is not None:
old_dir = os.getcwd()
os.chdir(cwd)
pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
if cwd is not None:
os.chdir(old_dir)
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with non-ASCII characters
metadata = {'url': 'xxx', 'author': '\u00c9ric',
'author_email': 'xxx', 'name': 'xxx',
'version': 'xxx',
'description': 'Something about esszet \u00df',
'long_description': 'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
# check that includes work to test #31292
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = []
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code:: python
def foo():
pass
"""))
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code-block:: python
def foo():
pass
"""))
for rest_with_code in example_rst_docs:
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
self.assertEqual(len(msgs), 0)
else:
self.assertEqual(len(msgs), 1)
self.assertEqual(
str(msgs[0][1]),
'Cannot analyze code. Pygments package not found.'
)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def METHOD_NAME():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(METHOD_NAME()) |
299,802 | enumerate all possible 1d sharding | import operator
from copy import deepcopy
from functools import reduce
from typing import Dict
import torch
from colossalai.tensor.sharding_spec import ShardingSpec
__all__ = [
'transpose_partition_dim', 'update_partition_dim', 'enumerate_all_possible_1d_sharding',
'enumerate_all_possible_2d_sharding', 'generate_sharding_size'
]
def transpose_partition_dim(sharding_spec: ShardingSpec, dim1: int, dim2: int) -> ShardingSpec:
"""
Switch the sharding mesh dimensions for two tensor dimensions. This operation is in-place.
Args:
sharding_spec (ShardingSpec): the sharding spec for which partition dim are switched
dim1 (int): the tensor dimension to switch
dim2 (int): the tensor dimension to switch
"""
assert len(sharding_spec.entire_shape) >= 2, \
'The entire_shape of the sharding spec must have at least 2 dimensions'
dim_partition_dict = sharding_spec.dim_partition_dict
# transpose the dim partition
dim1_partition = dim_partition_dict.pop(dim1, None)
dim2_partition = dim_partition_dict.pop(dim2, None)
if dim1_partition:
dim_partition_dict[dim2] = dim1_partition
if dim2_partition:
dim_partition_dict[dim1] = dim2_partition
# get the transposed shape
new_shape = list(sharding_spec.entire_shape[:])
new_shape[dim2], new_shape[dim1] = new_shape[dim1], new_shape[dim2]
new_shape = torch.Size(new_shape)
# re-init the sharding spec
sharding_spec.__init__(sharding_spec.device_mesh, new_shape, dim_partition_dict)
return sharding_spec
def update_partition_dim(sharding_spec: ShardingSpec,
dim_mapping: Dict[int, int],
physical_shape: torch.Size,
inplace: bool = False):
"""
This method is used to update the partition dim dict from the logical one to the physical one.
Args:
sharding_spec (ShardingSpec): the sharding spec for which partition dims are updated
dim_mapping (Dict[int, int]): the mapping from the logical tensor dimension to the physical tensor dimension
physical_shape (torch.Size): the physical shape for the tensor
"""
if inplace:
current_sharding_spec = sharding_spec
else:
current_sharding_spec = deepcopy(sharding_spec)
old_dim_partition_dict = current_sharding_spec.dim_partition_dict
new_dim_partition_dict = {}
# assign new dim
for old_dim, new_dim in dim_mapping.items():
mesh_dims = old_dim_partition_dict.pop(old_dim)
new_dim_partition_dict[new_dim] = mesh_dims
for tensor_dim, mesh_dims in old_dim_partition_dict.items():
if tensor_dim in new_dim_partition_dict:
raise KeyError(f"There are duplicated entries for the tensor sharding dimension {tensor_dim}")
else:
new_dim_partition_dict[tensor_dim] = mesh_dims
# update sharding spec
current_sharding_spec.__init__(device_mesh=sharding_spec.device_mesh,
entire_shape=physical_shape,
dim_partition_dict=new_dim_partition_dict)
return current_sharding_spec
def enumerate_all_possible_2d_sharding(mesh_dim_0, mesh_dim_1, dim_size):
dim_partition_list = []
# enumerate all the 2D sharding cases
for i in range(dim_size):
for j in range(i + 1, dim_size):
dim_partition_dict_0 = {i: [mesh_dim_0], j: [mesh_dim_1]}
dim_partition_dict_1 = {i: [mesh_dim_1], j: [mesh_dim_0]}
dim_partition_list.append(dim_partition_dict_0)
dim_partition_list.append(dim_partition_dict_1)
for i in range(dim_size):
dim_partition_dict_flatten = {i: [mesh_dim_0, mesh_dim_1]}
dim_partition_list.append(dim_partition_dict_flatten)
return dim_partition_list
def METHOD_NAME(mesh_dim_0, dim_size):
dim_partition_list = []
# enumerate all the 1D sharding cases
for i in range(dim_size):
dim_partition_dict_0 = {i: [mesh_dim_0]}
dim_partition_list.append(dim_partition_dict_0)
return dim_partition_list
def generate_sharding_size(dim_partition_dict, device_mesh):
total_sharding_size = 1
for mesh_dim_list in dim_partition_dict.values():
mesh_dim_sharding_size = [device_mesh.shape[mesh_dim] for mesh_dim in mesh_dim_list]
sharding_size = reduce(operator.mul, mesh_dim_sharding_size)
total_sharding_size *= sharding_size
return total_sharding_size |
299,803 | list disaster recovery config keys output | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'ListDisasterRecoveryConfigKeysResult',
'AwaitableListDisasterRecoveryConfigKeysResult',
'list_disaster_recovery_config_keys',
'list_disaster_recovery_config_keys_output',
]
@pulumi.output_type
class ListDisasterRecoveryConfigKeysResult:
"""
Namespace/EventHub Connection String
"""
def __init__(__self__, alias_primary_connection_string=None, alias_secondary_connection_string=None, key_name=None, primary_connection_string=None, primary_key=None, secondary_connection_string=None, secondary_key=None):
if alias_primary_connection_string and not isinstance(alias_primary_connection_string, str):
raise TypeError("Expected argument 'alias_primary_connection_string' to be a str")
pulumi.set(__self__, "alias_primary_connection_string", alias_primary_connection_string)
if alias_secondary_connection_string and not isinstance(alias_secondary_connection_string, str):
raise TypeError("Expected argument 'alias_secondary_connection_string' to be a str")
pulumi.set(__self__, "alias_secondary_connection_string", alias_secondary_connection_string)
if key_name and not isinstance(key_name, str):
raise TypeError("Expected argument 'key_name' to be a str")
pulumi.set(__self__, "key_name", key_name)
if primary_connection_string and not isinstance(primary_connection_string, str):
raise TypeError("Expected argument 'primary_connection_string' to be a str")
pulumi.set(__self__, "primary_connection_string", primary_connection_string)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if secondary_connection_string and not isinstance(secondary_connection_string, str):
raise TypeError("Expected argument 'secondary_connection_string' to be a str")
pulumi.set(__self__, "secondary_connection_string", secondary_connection_string)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
@property
@pulumi.getter(name="aliasPrimaryConnectionString")
def alias_primary_connection_string(self) -> str:
"""
Primary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_primary_connection_string")
@property
@pulumi.getter(name="aliasSecondaryConnectionString")
def alias_secondary_connection_string(self) -> str:
"""
Secondary connection string of the alias if GEO DR is enabled
"""
return pulumi.get(self, "alias_secondary_connection_string")
@property
@pulumi.getter(name="keyName")
def key_name(self) -> str:
"""
A string that describes the AuthorizationRule.
"""
return pulumi.get(self, "key_name")
@property
@pulumi.getter(name="primaryConnectionString")
def primary_connection_string(self) -> str:
"""
Primary connection string of the created namespace AuthorizationRule.
"""
return pulumi.get(self, "primary_connection_string")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter(name="secondaryConnectionString")
def secondary_connection_string(self) -> str:
"""
Secondary connection string of the created namespace AuthorizationRule.
"""
return pulumi.get(self, "secondary_connection_string")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> str:
"""
A base64-encoded 256-bit primary key for signing and validating the SAS token.
"""
return pulumi.get(self, "secondary_key")
class AwaitableListDisasterRecoveryConfigKeysResult(ListDisasterRecoveryConfigKeysResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return ListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=self.alias_primary_connection_string,
alias_secondary_connection_string=self.alias_secondary_connection_string,
key_name=self.key_name,
primary_connection_string=self.primary_connection_string,
primary_key=self.primary_key,
secondary_connection_string=self.secondary_connection_string,
secondary_key=self.secondary_key)
def list_disaster_recovery_config_keys(alias: Optional[str] = None,
authorization_rule_name: Optional[str] = None,
namespace_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableListDisasterRecoveryConfigKeysResult:
"""
Gets the primary and secondary connection strings for the Namespace.
:param str alias: The Disaster Recovery configuration name
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The Namespace name
:param str resource_group_name: Name of the resource group within the azure subscription.
"""
__args__ = dict()
__args__['alias'] = alias
__args__['authorizationRuleName'] = authorization_rule_name
__args__['namespaceName'] = namespace_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:eventhub/v20230101preview:listDisasterRecoveryConfigKeys', __args__, opts=opts, typ=ListDisasterRecoveryConfigKeysResult).value
return AwaitableListDisasterRecoveryConfigKeysResult(
alias_primary_connection_string=pulumi.get(__ret__, 'alias_primary_connection_string'),
alias_secondary_connection_string=pulumi.get(__ret__, 'alias_secondary_connection_string'),
key_name=pulumi.get(__ret__, 'key_name'),
primary_connection_string=pulumi.get(__ret__, 'primary_connection_string'),
primary_key=pulumi.get(__ret__, 'primary_key'),
secondary_connection_string=pulumi.get(__ret__, 'secondary_connection_string'),
secondary_key=pulumi.get(__ret__, 'secondary_key'))
@_utilities.lift_output_func(list_disaster_recovery_config_keys)
def METHOD_NAME(alias: Optional[pulumi.Input[str]] = None,
authorization_rule_name: Optional[pulumi.Input[str]] = None,
namespace_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[ListDisasterRecoveryConfigKeysResult]:
"""
Gets the primary and secondary connection strings for the Namespace.
:param str alias: The Disaster Recovery configuration name
:param str authorization_rule_name: The authorization rule name.
:param str namespace_name: The Namespace name
:param str resource_group_name: Name of the resource group within the azure subscription.
"""
... |
299,804 | test set dtype | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-function-docstring,missing-module-docstring
import pytest
import tvm
import tvm.testing
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import (
assert_structural_equal_ignore_global_symbol,
verify_trace_roundtrip,
)
# fmt: off
# pylint: disable=no-member,invalid-name,unused-variable,unexpected-keyword-arg
@T.prim_func
def element_wise(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B[vi, vj] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
C[vi, vj] = B[vi, vj] + 1.0
@T.prim_func
def element_wise_set_dtype(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")):
B = T.alloc_buffer((128, 128), "float16")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B[vi, vj] = T.cast(A[vi, vj] * 2.0, "float16")
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
C[vi, vj] = T.cast(B[vi, vj], "float32") + 1.0
@T.prim_func
def element_wise_subregion_match(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), dtype="float32")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion0 = T.match_buffer(B[vi, vj], [], offset_factor=1)
B_subregion0[()] = A[vi, vj] * 2.0
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
B_subregion1 = T.match_buffer(B[vi, vj], [], offset_factor=1)
C[vi, vj] = B_subregion1[()] + 1.0
@T.prim_func
def element_wise_subregion_match_set_dtype(A: T.Buffer((128, 128), "float32"), C: T.Buffer((128, 128), "float32")) -> None:
B = T.alloc_buffer((128, 128), "float16")
for i, j in T.grid(128, 128):
with T.block("B"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(A[vi, vj])
T.writes(B[vi, vj])
B_subregion0 = T.match_buffer(B[vi, vj], (), "float16", offset_factor=1)
B_subregion0[()] = T.cast(A[vi, vj] * 2.0, "float16")
for i, j in T.grid(128, 128):
with T.block("C"):
vi, vj = T.axis.remap("SS", [i, j])
T.reads(B[vi, vj])
T.writes(C[vi, vj])
B_subregion1 = T.match_buffer(B[vi, vj], (), "float16", offset_factor=1)
C[vi, vj] = T.cast(B_subregion1[()], "float32") + 1.0
use_block_name = tvm.testing.parameter(by_dict={"block_obj": False, "block_name": True})
def METHOD_NAME(use_block_name):
func = element_wise
sch = tir.Schedule(func, debug_mask="all")
sch.unsafe_set_dtype("B" if use_block_name else sch.get_block("B"), 0, "float16")
assert_structural_equal_ignore_global_symbol(element_wise_set_dtype, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func)
def test_set_dtype_fail_on_output_buffer(use_block_name):
func = element_wise
sch = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
sch.unsafe_set_dtype('C' if use_block_name else sch.get_block("C"), 0, "float16")
def test_set_dtype_fail_on_index_out_of_bound():
func = element_wise
sch = tir.Schedule(func, debug_mask='all')
with pytest.raises(tvm.tir.ScheduleError):
sch.unsafe_set_dtype(sch.get_block("B"), 1, "float64")
with pytest.raises(tvm.tir.ScheduleError):
sch.unsafe_set_dtype(sch.get_block("B"), -1, "float64")
def test_set_dtype_subregion():
func = element_wise_subregion_match
sch = tir.Schedule(func, debug_mask='all')
sch.unsafe_set_dtype(sch.get_block("B"), 0, "float16")
assert_structural_equal_ignore_global_symbol(element_wise_subregion_match_set_dtype, sch.mod["main"])
verify_trace_roundtrip(sch=sch, mod=func)
if __name__ == "__main__":
tvm.testing.main() |
299,805 | get vmware reg | '''
Underlying platform implementation for kernel debugging
with vmware gdbserver.
Msv1_0SubAuthenticationRoutine
VMWare config options...
debugStub.listen.guest64 = "TRUE" # ends up on port 8864 (or next avail)
# 32 bit target.... ( defaults to port 8832 )
debugStub.listen.guest32 = "TRUE"
debugStub.listen.guest32.remote = "TRUE" # bind to 0.0.0.0 rather than 127.0.0.1
debugStub.hideBreakpoints = "TRUE" # Enable breakpoints
# 64 bit target.... ( defaults to port 8864 )
debugStub.listen.guest64 = "TRUE"
debugStub.listen.guest64.remote = "TRUE" # bind to 0.0.0.0 rather than 127.0.0.1
debugStub.hideBreakpoints = "TRUE" # Enable breakpoints
'''
import logging
import PE
import vtrace
import envi.bits as e_bits
import envi.symstore.resolver as e_resolv
import envi.symstore.symcache as e_symcache
import vtrace.archs.i386 as vt_i386
import vtrace.platforms.base as vt_base
import vtrace.platforms.win32 as vt_win32
import vtrace.platforms.winkern as vt_winkern
import vtrace.platforms.gdbstub as vt_gdbstub
logger = logging.getLogger(__name__)
class VMWareMixin(vt_gdbstub.GdbStubMixin):
def __init__(self, host=None, port=None):
vt_gdbstub.GdbStubMixin.__init__(self, host=host, port=port)
self.bigmask = e_bits.u_maxes[ self.getPointerSize() ]
class VMWare32WindowsTrace(
vtrace.Trace,
VMWareMixin,
vt_i386.i386Mixin,
vt_base.TracerBase,
):
def __init__(self, host=None, port=None):
vtrace.Trace.__init__(self, archname='i386')
vt_base.TracerBase.__init__(self)
vt_i386.i386Mixin.__init__(self)
VMWareMixin.__init__(self, host=host, port=port)
self.setMeta('Format','pe')
self.setMeta('Platform','winkern')
self._break_after_bp = False # we stop directly on the bp addr
def METHOD_NAME(self, rname):
'''
Use VMWare's monitor extension to get a register we wouldn't
normally have...
'''
#fs 0x30 base 0xffdff000 limit 0x00001fff type 0x3 s 1 dpl 0 p 1 db 1
fsstr = self._monitorCommand('r %s' % rname)
fsparts = fsstr.split()
return int(fsparts[3], 16)
def _gdbJustAttached(self):
# Implement the callback from the GdbStubMixin parent...
fsbase = self.METHOD_NAME('fs')
fs_fields = self.readMemoryFormat(fsbase, '<8I')
# Windows has a self reference in the KPCR...
if fs_fields[7] != fsbase:
logger.warning(str([ hex(x) for x in fs_fields ]))
raise Exception('poi(fsbase+(ptr*7)) != fsbase! ( not actually windows? )')
vt_winkern.initWinkernTrace(self, fsbase)
return
def normFileName(self, libname):
basename = libname.split('\\')[-1]
return basename.split(".")[0].split("-")[0].lower()
def platformParseBinary(self, filename, baseaddr, normname):
try:
pe = PE.peFromMemoryObject(self, baseaddr)
vhash = e_symcache.symCacheHashFromPe(pe)
symcache = self.symcache.getCacheSyms(vhash)
if symcache is None:
# Symbol type 0 for now...
symcache = [(rva, 0, name, e_resolv.SYMSTOR_SYM_SYMBOL) for rva, ord, name in pe.getExports()]
self.symcache.setCacheSyms(vhash, symcache)
self.impSymCache(symcache, symfname=normname, baseaddr=baseaddr)
except Exception as e:
logger.error('Error Parsing Binary (%s): %s', normname, e)
def buildNewTrace(self):
return VMWare32WindowsTrace(host=self._gdb_host, port=self._gdb_port)
# FIXME move these to gdbstub
def isValidPointer(self, addr):
# Fake this out by attempting to read... ( slow/lame )
cmd = 'm%x,%x' % (addr, 1)
pkt = self._cmdTransact(cmd)
return not pkt.startswith('E')
def archActivBreakpoint(self, addr):
self._gdbAddMemBreak(addr, 1)
def archClearBreakpoint(self, addr):
self._gdbDelMemBreak(addr, 1)
|
299,806 | calc coverage | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
import inspect
import logging
import os
import sys
import traceback
import datetime as dt
from azure.core.exceptions import AzureError
from azure.cli.testsdk.exceptions import (
CliTestError,
CliExecutionError,
JMESPathCheckAssertionError,
)
logger = logging.getLogger("azure.cli.testsdk")
logger.addHandler(logging.StreamHandler())
__path__ = __import__("pkgutil").extend_path(__path__, __name__)
exceptions = []
test_map = dict()
SUCCESSED = "successed"
FAILED = "failed"
def try_manual(func):
def import_manual_function(origin_func):
from importlib import import_module
decorated_path = inspect.getfile(origin_func).lower()
module_path = __path__[0].lower()
if not decorated_path.startswith(module_path):
raise Exception("Decorator can only be used in submodules!")
manual_path = os.path.join(decorated_path[module_path.rfind(os.path.sep) + 1 :])
manual_file_path, manual_file_name = os.path.split(manual_path)
module_name, _ = os.path.splitext(manual_file_name)
manual_module = "..manual." + ".".join(
manual_file_path.split(os.path.sep)
+ [
module_name,
]
)
return getattr(
import_module(manual_module, package=__name__), origin_func.__name__
)
def get_func_to_call():
func_to_call = func
try:
func_to_call = import_manual_function(func)
logger.info("Found manual override for %s(...)", func.__name__)
except (ImportError, AttributeError):
pass
return func_to_call
def wrapper(*args, **kwargs):
func_to_call = get_func_to_call()
logger.info("running %s()...", func.__name__)
try:
test_map[func.__name__] = dict()
test_map[func.__name__]["result"] = SUCCESSED
test_map[func.__name__]["error_message"] = ""
test_map[func.__name__]["error_stack"] = ""
test_map[func.__name__]["error_normalized"] = ""
test_map[func.__name__]["start_dt"] = dt.datetime.utcnow()
ret = func_to_call(*args, **kwargs)
except (
AssertionError,
AzureError,
CliTestError,
CliExecutionError,
SystemExit,
JMESPathCheckAssertionError,
) as e:
use_exception_cache = os.getenv("TEST_EXCEPTION_CACHE")
if use_exception_cache is None or use_exception_cache.lower() != "true":
raise
test_map[func.__name__]["end_dt"] = dt.datetime.utcnow()
test_map[func.__name__]["result"] = FAILED
test_map[func.__name__]["error_message"] = (
str(e).replace("\r\n", " ").replace("\n", " ")[:500]
)
test_map[func.__name__]["error_stack"] = (
traceback.format_exc().replace("\r\n", " ").replace("\n", " ")[:500]
)
logger.info("--------------------------------------")
logger.info("step exception: %s", e)
logger.error("--------------------------------------")
logger.error("step exception in %s: %s", func.__name__, e)
logger.info(traceback.format_exc())
exceptions.append((func.__name__, sys.exc_info()))
else:
test_map[func.__name__]["end_dt"] = dt.datetime.utcnow()
return ret
if inspect.isclass(func):
return get_func_to_call()
return wrapper
def METHOD_NAME(filename):
filename = filename.split(".")[0]
coverage_name = filename + "_coverage.md"
with open(coverage_name, "w") as f:
f.write(
"|Scenario|Result|ErrorMessage|ErrorStack|ErrorNormalized|StartDt|EndDt|\n"
)
total = len(test_map)
covered = 0
for k, v in test_map.items():
if not k.startswith("step_"):
total -= 1
continue
if v["result"] == SUCCESSED:
covered += 1
f.write(
"|{step_name}|{result}|{error_message}|{error_stack}|{error_normalized}|{start_dt}|"
"{end_dt}|\n".format(step_name=k, **v)
)
f.write("Coverage: {}/{}\n".format(covered, total))
print("Create coverage\n", file=sys.stderr)
def raise_if():
if exceptions:
if len(exceptions) <= 1:
raise exceptions[0][1][1]
message = "{}\nFollowed with exceptions in other steps:\n".format(
str(exceptions[0][1][1])
)
message += "\n".join(["{}: {}".format(h[0], h[1][1]) for h in exceptions[1:]])
raise exceptions[0][1][0](message).with_traceback(exceptions[0][1][2]) |
299,807 | histplot bokeh op | """Bokeh Distplot."""
import matplotlib.pyplot as plt
import numpy as np
from ....stats.density_utils import get_bins, histogram
from ...kdeplot import plot_kde
from ...plot_utils import (
_scale_fig_size,
set_bokeh_circular_ticks_labels,
vectorized_to_hex,
_init_kwargs_dict,
)
from .. import show_layout
from . import backend_kwarg_defaults, create_axes_grid
def plot_dist(
values,
values2,
color,
kind,
cumulative,
label,
rotated,
rug,
bw,
quantiles,
contour,
fill_last,
figsize,
textsize,
plot_kwargs,
fill_kwargs,
rug_kwargs,
contour_kwargs,
contourf_kwargs,
pcolormesh_kwargs,
hist_kwargs,
is_circular,
ax,
backend_kwargs,
show,
):
"""Bokeh distplot."""
backend_kwargs = _init_kwargs_dict(backend_kwargs)
backend_kwargs = {
**backend_kwarg_defaults(),
**backend_kwargs,
}
figsize, *_ = _scale_fig_size(figsize, textsize)
color = vectorized_to_hex(color)
hist_kwargs = _init_kwargs_dict(hist_kwargs)
if kind == "hist":
hist_kwargs.setdefault("cumulative", cumulative)
hist_kwargs.setdefault("fill_color", color)
hist_kwargs.setdefault("line_color", color)
hist_kwargs.setdefault("line_alpha", 0)
if label is not None:
hist_kwargs.setdefault("legend_label", str(label))
if ax is None:
ax = create_axes_grid(
1,
figsize=figsize,
squeeze=True,
polar=is_circular,
backend_kwargs=backend_kwargs,
)
if kind == "auto":
kind = "hist" if values.dtype.kind == "i" else "kde"
if kind == "hist":
METHOD_NAME(
values=values,
values2=values2,
rotated=rotated,
ax=ax,
hist_kwargs=hist_kwargs,
is_circular=is_circular,
)
elif kind == "kde":
plot_kwargs = _init_kwargs_dict(plot_kwargs)
if color is None:
color = plt.rcParams["axes.prop_cycle"].by_key()["color"][0]
plot_kwargs.setdefault("line_color", color)
legend = label is not None
plot_kde(
values,
values2,
cumulative=cumulative,
rug=rug,
label=label,
bw=bw,
is_circular=is_circular,
quantiles=quantiles,
rotated=rotated,
contour=contour,
legend=legend,
fill_last=fill_last,
plot_kwargs=plot_kwargs,
fill_kwargs=fill_kwargs,
rug_kwargs=rug_kwargs,
contour_kwargs=contour_kwargs,
contourf_kwargs=contourf_kwargs,
pcolormesh_kwargs=pcolormesh_kwargs,
ax=ax,
backend="bokeh",
backend_kwargs={},
show=False,
)
else:
raise TypeError(f'Invalid "kind":{kind}. Select from {{"auto","kde","hist"}}')
show_layout(ax, show)
return ax
def METHOD_NAME(values, values2, rotated, ax, hist_kwargs, is_circular):
"""Add a histogram for the data to the axes."""
if values2 is not None:
raise NotImplementedError("Insert hexbin plot here")
color = hist_kwargs.pop("color", False)
if color:
hist_kwargs["fill_color"] = color
hist_kwargs["line_color"] = color
bins = hist_kwargs.pop("bins", None)
if bins is None:
bins = get_bins(values)
hist, hist_dens, edges = histogram(np.asarray(values).flatten(), bins=bins)
if hist_kwargs.pop("density", True):
hist = hist_dens
if hist_kwargs.pop("cumulative", False):
hist = np.cumsum(hist)
hist /= hist[-1]
if values.dtype.kind == "i":
edges = edges.astype(float) - 0.5
if is_circular:
if is_circular == "degrees":
edges = np.deg2rad(edges)
labels = ["0°", "45°", "90°", "135°", "180°", "225°", "270°", "315°"]
else:
labels = [
r"0",
r"π/4",
r"π/2",
r"3π/4",
r"π",
r"5π/4",
r"3π/2",
r"7π/4",
]
delta = np.mean(np.diff(edges) / 2)
ax.annular_wedge(
x=0,
y=0,
inner_radius=0,
outer_radius=hist,
start_angle=edges[1:] - delta,
end_angle=edges[:-1] - delta,
direction="clock",
**hist_kwargs,
)
ax = set_bokeh_circular_ticks_labels(ax, hist, labels)
elif rotated:
ax.quad(top=edges[:-1], bottom=edges[1:], left=0, right=hist, **hist_kwargs)
else:
ax.quad(top=hist, bottom=0, left=edges[:-1], right=edges[1:], **hist_kwargs)
return ax |
299,808 | make any methods | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import os, re, string
src_dir = r"c:\Documents and Settings\Jim\My Documents\Visual Studio Projects\IronPython"
meth_pat = r"(?P<ret_type>\w+)\s+(?P<name>\w+)\s*\((?P<params>(?:\w|\s|,)*)\)"
meth_pat = re.compile(r"public\s+(?P<mods>static\s+)?(virtual\s+)?"+meth_pat)
class_pat = re.compile(r"public\s+(abstract\s+)?class\s+(?P<name>\w+)\s*(:\s*(?P<super_name>\w+))?")
START = "//BEGIN-GENERATED"
END = "//END-GENERATED"
generated_pat = re.compile(START+".*"+END, re.DOTALL)
from_table = {
'int':"Py.asInt(%s)",
'double':"Py.asDouble(%s)",
'char':"Py.asChar(%s)",
'PyObject':"%s",
}
def from_any(totype, name):
pat = from_table.get(totype, None)
if pat is None:
return "(%s)%s" % (totype, name)
else:
return pat % name
to_table = {
'int':"PyInteger.make(%s)",
'double':"PyFloat.make(%s)",
'bool':"PyBoolean.make(%s)",
'char':"PyString.make(%s)",
'PyObject':"%s",
}
def to_any(totype, name):
pat = to_table.get(totype, None)
if pat is None:
return name #"(%s)%s" % (totype, name)
else:
return pat % name
BINOP = """
public override PyObject __%(name)s__(PyObject other) {
%(type)s ov;
if (!Py.check%(ctype)s(other, out ov)) return Py.NotImplemented;
return %(ret_type)s.make(%(name)s(_value, ov));
}
public override PyObject __r%(name)s__(PyObject other) {
%(type)s ov;
if (!Py.check%(ctype)s(other, out ov)) return Py.NotImplemented;
return %(ret_type)s.make(%(name)s(ov, _value));
}
"""
UNOP = """
public override PyObject __%(name)s__() {
return %(ret_type)s.make(%(name)s(_value));
}
"""
class Method:
def __init__(self, ret_type, name, params, mods=None):
self.param_list = map(lambda s: s.strip(), string.split(params, ","))
self.param_list = filter(lambda s: s, self.param_list)
self.ret_type = ret_type
self.name = name
self.mods = mods
self.is_invokable = True
def get_public_name(self):
name = self.name
if name.endswith("_") and not name.endswith("__"):
name = name[:-1]
return name
def is_any(self):
if self.ret_type != ANY or self.mods: return False
for p in self.param_list:
if not p.startswith(ANY): return False
return True
def is_static(self):
return self.mods is not None
def is_internal(self):
return self.name.endswith("__")
def make_op(self):
self.is_invokable = False
type = self.param_list[0].split(" ")[0]
dict = {
'name':self.get_public_name(),
'type':type,
'ctype':type.capitalize(),
'ret_type':to_table[self.ret_type].split('.')[0]
}
if len(self.param_list) == 2:
return (BINOP % dict).split("\n")
else:
return (UNOP % dict).split("\n")
def make_any(self, in_module=False):
if not in_module and self.is_static():
return self.make_op()
name = self.get_public_name()
params = []
args = []
for p in self.param_list:
ptype, pname = string.split(p)
args.append(from_any(ptype, pname))
params.append("PyObject %s" % pname)
if self.is_internal():
mods = "override "
self.is_invokable = False
else:
mods = ""
if self.is_static():
mods += "static "
ret = ["public %sPyObject %s(%s) {" % (mods, name, string.join(params, ", "))]
if self.ret_type == 'void':
ret.append(" %s(%s);" % (self.name, string.join(args, ", ")))
ret.append(" return Py.None;")
else:
value = "%s(%s)" % (self.name, string.join(args, ", "))
ret.append(" return %s;" % to_any(self.ret_type, value))
ret.append("}")
return ret
def __repr__(self):
return "Method(%s, %s, %s)" % (self.ret_type, self.name, self.param_list)
class Class:
def __init__(self, name, super_name, methods):
self.name = name
self.super_name = super_name
self.methods = methods
self.strings = {}
def get_constant_string(self, s):
if not self.strings.has_key(s):
self.strings[s] = s + "_str"
return self.strings[s]
def make_invoke_method(self, nargs):
params = ["PyString name"]
args = ["name"]
for i in range(nargs):
params.append("PyObject arg%d" % i)
args.append("arg%d" % i)
ret = ["public override PyObject invoke(%s) {" % ", ".join(params)]
ret.append(" if (name.interned) {")
#TODO create switch clause when more than 3-8 matches
for method in self.methods:
if not method.is_invokable or len(method.param_list) != nargs:
continue
name = method.get_public_name()
ret.append(" if (name == %s) return %s(%s);" %
(self.get_constant_string(name), name,
", ".join(args[1:])))
if len(ret) == 2: return []
ret.append(" }")
ret.append(" return base.invoke(%s);" % ", ".join(args))
ret.append("}")
return ret
def METHOD_NAME(self):
ret = []
# first generate the any forms of each method
for method in self.methods:
if not method.is_any():
ret.extend(method.make_any(self.super_name == 'PyModule'))
# now generate the various invoke methods
for i in range(4):
ret.extend(self.make_invoke_method(i))
#TODO invokeN
for value, name in self.strings.items():
ret.append('static readonly PyString %s = PyString.intern("%s");' %
(name, value))
return ret
def __repr__(self):
return "Class(%s, %s)" % (self.name, self.super_name)
def collect_methods(text):
text = generated_pat.sub("", text)
ret = []
match= class_pat.search(text)
#print match
if match is None:
return None
cl = Class(match.group('name'), match.group('super_name'), ret)
for match in meth_pat.finditer(text):
meth = Method(**match.groupdict())
if meth.is_static() and meth.name in ['make', 'intern']: continue
ret.append(meth)
return cl
base = collect_methods(open(os.path.join(src_dir, "PyObject.cs")).read())
ANY = base.name
for file in os.listdir(src_dir):
filename = os.path.join(src_dir, file)
if not filename.endswith(".cs"): continue
text = open(filename).read()
if generated_pat.search(text) is None: continue
c = collect_methods(text)
assert c is not None and c.name != base.name
#if c.super_name != 'PyModule':
# assert c.super_name == base.name, c.super_name
print c, c.methods
code = c.METHOD_NAME()
code.insert(0, START)
code.append(END)
generated_code = "\n\t\t".join(code)
new_text = generated_pat.sub(generated_code, text)
#print new_text
if text != new_text:
open(filename, 'w').write(new_text)
#print c, c.methods
#print meth_pat.search("public void m(int a, float d)").groups()
|
299,809 | function by name | import subprocess
import sys
import re
function_intro_re = re.compile(r'^(?P<addr>[0-9a-fA-F]{8}) <(?P<name>[a-zA-Z0-9\._]+)>:$')
insn_re = re.compile(r'^\s+(?P<addr>[0-9a-fA-F]+):\s+(?P<insn>[0-9a-fA-F ]+)\s+\t(?P<op>.*)$')
class Instruction:
def __init__(self, addr, insn, op):
self.addr = long(addr, 16)
self.insn = insn
args = op.split('\t', 1)
self.op = args[0].strip()
if len(args) == 2:
comment = args[1].strip().split(';', 1)
else:
comment = args
self.args = comment[0].strip()
if len(comment) == 2:
self.comment = comment[1].strip()
else:
self.comment = ''
def __repr__(self):
return '<insn %r>' % (self.__dict__)
def literal_branch_target(t):
return ' <' in t
class Function:
def __init__(self, addr, name):
self.name = name
self.addr = long(addr, 16)
self.insns = []
self.calls = []
def __repr__(self):
return '<%s %d instructions>' % (self.name, len(self.insns))
def add_insn(self, insn):
self.insns.append(Instruction(**insn))
def contains_addr(self, addr):
if self.insns:
return addr >= self.addr and addr <= self.insns[-1].addr
else:
return addr == self.addr
def dump(self):
print self.name + ':'
for insn in self.insns:
print ' ', '%04x' % insn.addr + ':', insn.op, insn.args, '\t;', insn.comment
def get_literal_word(self, addr):
for insn in self.insns:
if insn.addr == addr and insn.op == '.word':
w = int(insn.args, 16)
if w & 0x80000000:
w = -(w ^ 0xffffffff) + 1
return w
return None
def analyse(self, prog):
self.stack_guess = None
regs = {}
for insn in self.insns:
# stack adjustment with literal
if insn.op == 'sub' and insn.args.startswith('sp, ') and self.stack_guess is None:
sz = int(insn.args.split('#', 1)[1])
self.stack_guess = sz
# literal pool loads
if insn.op == 'ldr' and ', [pc, #' in insn.args:
reg, offset = insn.args.split(', [pc, #')
offset = int(offset.replace(']', ''))
word = self.get_literal_word(insn.addr + offset + 2)
if word is not None:
regs[reg] = word
if insn.op == 'add' and insn.args.startswith('sp, r') and self.stack_guess is None:
reg = insn.args.split(', ')[1]
if reg in regs:
self.stack_guess = regs[reg]
# static branches
if insn.op[0] == 'b' and literal_branch_target(insn.args):
target = long(insn.args.split(' <', 1)[0], 16)
targetf = prog.function_at_addr(target)
if targetf and targetf != self:
self.calls.append(targetf)
if self.stack_guess is None:
self.stack_guess = 0
def stack_usage(self, hints, warns, prog, depth = 0):
hinted_calls = []
if self.stack_guess:
print ' ' * depth, 'stack:', self.name, self.stack_guess, 'bytes'
our_hints = [h for h in hints if h and h[0] == self.name]
if our_hints:
hints = [h[1:] for h in our_hints]
hinted_calls = [prog.METHOD_NAME(h[0]) for h in hints if h]
else:
if self.name in warns:
print ' WARN: no calls hints for fn-ptr caller', self.name
if self.calls + hinted_calls:
call_usage = max([f.stack_usage(hints, warns, prog, depth + 1) for f in self.calls + hinted_calls])
else:
call_usage = 0
return self.stack_guess + call_usage
class Program:
def __init__(self):
self.functions = []
# sequence of tuples naming a call sequence known to occur
# this allows working out calls through pointers
self.call_hints = []
# function names to warn on if we don't have callees
self.call_warns = set()
def read_elf(self, elf):
current_fn = None
for x in subprocess.Popen(['arm-none-eabi-objdump', '-d', elf],
stdout = subprocess.PIPE).stdout:
x = x.rstrip('\n')
m = function_intro_re.match(x)
if m:
fn = Function(**m.groupdict())
current_fn = fn
self.functions.append(fn)
m = insn_re.match(x)
if m:
assert current_fn
current_fn.add_insn(m.groupdict())
def analyse(self):
for f in self.functions:
f.analyse(self)
def METHOD_NAME(self, name):
fns = [fn for fn in self.functions if fn.name == name]
if len(fns) == 0:
return None
elif len(fns) == 1:
return fns[0]
else:
print 'warn: more than one function named', name
return None
def function_at_addr(self, addr):
for f in self.functions:
if f.addr == addr:
return f
return None
def add_call_hint(self, *seq):
self.call_hints.append(seq)
def add_call_warn(self, fn):
self.call_warns.add(fn)
def measure_stack(self, name):
fn = self.METHOD_NAME(name)
if fn is None:
return 0
return fn.stack_usage(self.call_hints, self.call_warns, self)
_, exe, fn = sys.argv
p = Program()
p.read_elf(exe)
p.analyse()
# calls which indirect through fn ptrs
p.add_call_warn('cf_blockwise_accumulate')
p.add_call_warn('cf_blockwise_accumulate_final')
# hints to resolve those
p.add_call_hint('cf_sha224_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'sha256_update_block')
p.add_call_hint('cf_sha256_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'sha256_update_block')
p.add_call_hint('cf_sha384_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'sha512_update_block')
p.add_call_hint('cf_sha512_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'sha512_update_block')
p.add_call_hint('cf_norx32_encrypt', 'input', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'input_block')
p.add_call_hint('cf_norx32_decrypt', 'input', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'input_block')
p.add_call_hint('cf_cbcmac_stream_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'cbcmac_process')
p.add_call_hint('cf_cmac_stream_update', 'cf_blockwise_accumulate', 'cf_blockwise_accumulate_final', 'cmac_process_final_pad')
p.add_call_hint('cf_cmac_stream_update', 'cf_blockwise_accumulate_final', 'cmac_process')
p.add_call_hint('cf_cmac_stream_update', 'cf_blockwise_accumulate_final', 'cmac_process_final_nopad')
print 'stack', fn, '=', p.measure_stack(fn) |
299,810 | test context with path in cwd | from os import getcwd, path
from unittest.mock import sentinel
from sceptre.context import SceptreContext
class TestSceptreContext(object):
def setup_method(self, test_method):
self.templates_path = "templates"
self.config_path = "config"
self.config_file = "config.yaml"
def METHOD_NAME(self):
self.context = SceptreContext(
project_path="project_path/to/sceptre",
command_path="command-path",
command_params=sentinel.command_params,
user_variables=sentinel.user_variables,
options=sentinel.options,
output_format=sentinel.output_format,
no_colour=sentinel.no_colour,
ignore_dependencies=sentinel.ignore_dependencies,
)
sentinel.project_path = f"{getcwd()}/project_path/to/sceptre"
assert self.context.project_path.replace(path.sep, "/") == sentinel.project_path
def test_context_with_relative_path(self):
self.context = SceptreContext(
project_path="./project_path/to/sceptre",
command_path="command-path",
command_params=sentinel.command_params,
user_variables=sentinel.user_variables,
options=sentinel.options,
output_format=sentinel.output_format,
no_colour=sentinel.no_colour,
ignore_dependencies=sentinel.ignore_dependencies,
)
expected = f"{getcwd()}/project_path/to/sceptre"
assert self.context.project_path.replace(path.sep, "/") == expected
def test_context_with_absolute_path(self):
self.context = SceptreContext(
project_path=f"{getcwd()}/project_path/to/sceptre",
command_path="command-path",
command_params=sentinel.command_params,
user_variables=sentinel.user_variables,
options=sentinel.options,
output_format=sentinel.output_format,
no_colour=sentinel.no_colour,
ignore_dependencies=sentinel.ignore_dependencies,
)
expected = f"{getcwd()}/project_path/to/sceptre"
assert self.context.project_path.replace(path.sep, "/") == expected
def test_full_config_path_returns_correct_path(self):
context = SceptreContext(
project_path="project_path",
command_path="command-path",
command_params=sentinel.command_params,
user_variables=sentinel.user_variables,
options=sentinel.options,
output_format=sentinel.output_format,
no_colour=sentinel.no_colour,
ignore_dependencies=sentinel.ignore_dependencies,
)
full_config_path = path.join(f"{getcwd()}/project_path", self.config_path)
assert context.full_config_path() == full_config_path
def test_full_command_path_returns_correct_path(self):
context = SceptreContext(
project_path="project_path",
command_path="command",
command_params=sentinel.command_params,
user_variables=sentinel.user_variables,
options=sentinel.options,
output_format=sentinel.output_format,
no_colour=sentinel.no_colour,
ignore_dependencies=sentinel.ignore_dependencies,
)
full_command_path = path.join(
f"{getcwd()}/project_path", self.config_path, "command"
)
assert context.full_command_path() == full_command_path
def test_full_templates_path_returns_correct_path(self):
context = SceptreContext(
project_path="project_path",
command_path="command",
command_params=sentinel.command_params,
user_variables=sentinel.user_variables,
options=sentinel.options,
output_format=sentinel.output_format,
no_colour=sentinel.no_colour,
ignore_dependencies=sentinel.ignore_dependencies,
)
full_templates_path = path.join(f"{getcwd()}/project_path", self.templates_path)
assert context.full_templates_path() == full_templates_path
def test_clone__returns_full_clone_of_context(self):
context = SceptreContext(
project_path="project_path",
command_path="command",
command_params={"params": "variables"},
user_variables={"user": "variables"},
options={"hello": "there"},
output_format=sentinel.output_format,
no_colour=sentinel.no_colour,
ignore_dependencies=sentinel.ignore_dependencies,
)
clone = context.clone()
assert clone is not context
assert clone.project_path == context.project_path
assert clone.command_path == context.command_path
assert clone.user_variables == context.user_variables
assert clone.user_variables is not context.user_variables
assert clone.options == context.options
assert clone.options is not context.options
assert clone.output_format == context.output_format
assert clone.no_colour == context.no_colour
assert clone.ignore_dependencies == context.ignore_dependencies |
299,811 | test main | import os
import unittest
import random
from test import test_support
thread = test_support.import_module('thread')
import time
import sys
import weakref
from test import lock_tests
NUMTASKS = 10
NUMTRIPS = 3
_print_mutex = thread.allocate_lock()
def verbose_print(arg):
"""Helper function for printing out debugging output."""
if test_support.verbose:
with _print_mutex:
print arg
class BasicThreadTest(unittest.TestCase):
def setUp(self):
self.done_mutex = thread.allocate_lock()
self.done_mutex.acquire()
self.running_mutex = thread.allocate_lock()
self.random_mutex = thread.allocate_lock()
self.created = 0
self.running = 0
self.next_ident = 0
class ThreadRunningTests(BasicThreadTest):
def newtask(self):
with self.running_mutex:
self.next_ident += 1
verbose_print("creating task %s" % self.next_ident)
thread.start_new_thread(self.task, (self.next_ident,))
self.created += 1
self.running += 1
def task(self, ident):
with self.random_mutex:
delay = random.random() / 10000.0
verbose_print("task %s will run for %sus" % (ident, round(delay*1e6)))
time.sleep(delay)
verbose_print("task %s done" % ident)
with self.running_mutex:
self.running -= 1
if self.created == NUMTASKS and self.running == 0:
self.done_mutex.release()
def test_starting_threads(self):
# Basic test for thread creation.
for i in range(NUMTASKS):
self.newtask()
verbose_print("waiting for tasks to complete...")
self.done_mutex.acquire()
verbose_print("all tasks done")
def test_stack_size(self):
# Various stack size tests.
self.assertEqual(thread.stack_size(), 0, "initial stack size is not 0")
thread.stack_size(0)
self.assertEqual(thread.stack_size(), 0, "stack_size not reset to default")
if os.name not in ("nt", "os2", "posix"):
return
tss_supported = True
try:
thread.stack_size(4096)
except ValueError:
verbose_print("caught expected ValueError setting "
"stack_size(4096)")
except thread.error:
tss_supported = False
verbose_print("platform does not support changing thread stack "
"size")
if tss_supported:
fail_msg = "stack_size(%d) failed - should succeed"
for tss in (262144, 0x100000, 0):
thread.stack_size(tss)
self.assertEqual(thread.stack_size(), tss, fail_msg % tss)
verbose_print("successfully set stack_size(%d)" % tss)
for tss in (262144, 0x100000):
verbose_print("trying stack_size = (%d)" % tss)
self.next_ident = 0
self.created = 0
for i in range(NUMTASKS):
self.newtask()
verbose_print("waiting for all tasks to complete")
self.done_mutex.acquire()
verbose_print("all tasks done")
thread.stack_size(0)
@unittest.skipIf(test_support.is_jython,
"This seems cpython internal, so skip for Jython")
def test__count(self):
# Test the _count() function.
orig = thread._count()
mut = thread.allocate_lock()
mut.acquire()
started = []
def task():
started.append(None)
mut.acquire()
mut.release()
thread.start_new_thread(task, ())
while not started:
time.sleep(0.01)
self.assertEqual(thread._count(), orig + 1)
# Allow the task to finish.
mut.release()
# The only reliable way to be sure that the thread ended from the
# interpreter's point of view is to wait for the function object to be
# destroyed.
done = []
wr = weakref.ref(task, lambda _: done.append(None))
del task
while not done:
time.sleep(0.01)
self.assertEqual(thread._count(), orig)
class Barrier:
def __init__(self, num_threads):
self.num_threads = num_threads
self.waiting = 0
self.checkin_mutex = thread.allocate_lock()
self.checkout_mutex = thread.allocate_lock()
self.checkout_mutex.acquire()
def enter(self):
self.checkin_mutex.acquire()
self.waiting = self.waiting + 1
if self.waiting == self.num_threads:
self.waiting = self.num_threads - 1
self.checkout_mutex.release()
return
self.checkin_mutex.release()
self.checkout_mutex.acquire()
self.waiting = self.waiting - 1
if self.waiting == 0:
self.checkin_mutex.release()
return
self.checkout_mutex.release()
class BarrierTest(BasicThreadTest):
def test_barrier(self):
self.bar = Barrier(NUMTASKS)
self.running = NUMTASKS
for i in range(NUMTASKS):
thread.start_new_thread(self.task2, (i,))
verbose_print("waiting for tasks to end")
self.done_mutex.acquire()
verbose_print("tasks done")
def task2(self, ident):
for i in range(NUMTRIPS):
if ident == 0:
# give it a good chance to enter the next
# barrier before the others are all out
# of the current one
delay = 0
else:
with self.random_mutex:
delay = random.random() / 10000.0
verbose_print("task %s will run for %sus" %
(ident, round(delay * 1e6)))
time.sleep(delay)
verbose_print("task %s entering %s" % (ident, i))
self.bar.enter()
verbose_print("task %s leaving barrier" % ident)
with self.running_mutex:
self.running -= 1
# Must release mutex before releasing done, else the main thread can
# exit and set mutex to None as part of global teardown; then
# mutex.release() raises AttributeError.
finished = self.running == 0
if finished:
self.done_mutex.release()
class LockTests(lock_tests.LockTests):
locktype = thread.allocate_lock
class TestForkInThread(unittest.TestCase):
def setUp(self):
self.read_fd, self.write_fd = os.pipe()
@unittest.skipIf(test_support.is_jython, "Jython does not support os.fork")
@unittest.skipIf(sys.platform.startswith('win'),
"This test is only appropriate for POSIX-like systems.")
@test_support.reap_threads
def test_forkinthread(self):
def thread1():
try:
pid = os.fork() # fork in a thread
except RuntimeError:
sys.exit(0) # exit the child
if pid == 0: # child
os.close(self.read_fd)
os.write(self.write_fd, "OK")
sys.exit(0)
else: # parent
os.close(self.write_fd)
thread.start_new_thread(thread1, ())
self.assertEqual(os.read(self.read_fd, 2), "OK",
"Unable to fork() in thread")
def tearDown(self):
try:
os.close(self.read_fd)
except OSError:
pass
try:
os.close(self.write_fd)
except OSError:
pass
def METHOD_NAME():
test_support.run_unittest(ThreadRunningTests, BarrierTest, LockTests,
TestForkInThread)
if __name__ == "__main__":
METHOD_NAME() |
299,812 | disable | #!/usr/bin/env python3
import abc
from typing import List, Optional
import numpy as np
import rospy
from image_geometry import PinholeCameraModel
from mil_msgs.msg import ObjectInImage, ObjectsInImage
from mil_ros_tools import Image_Subscriber, numpy_to_point2d
from std_srvs.srv import SetBool
__author__ = "Kevin Allen"
def create_object_msg(
name: str,
confidence: Optional[float] = None,
center: Optional[np.ndarray] = None,
contour: Optional[np.ndarray] = None,
rect: Optional[np.ndarray] = None,
attributes: str = "",
):
"""
Helper function to create a ``mil_msgs/ObjectInImage`` message.
Only one of center, contour, or rect should be set, depending on what information
is needed/available in your application.
Args:
name (str): Name of the identifed object.
attributes (str): Attributes to attach to message, the purpose and value
of this attribute will vary by application. Defaults to an empty string.
confidence (Optional[float]): Float between 0 and 1 describing the confidence
that name is correct for this object. Leave as ``None`` if confidence
is not known (will be set to -1).
center (Optional[np.ndarray]): ``[x, y]`` of the center point of the object.
contour (Optional[np.ndarray]): Nx1x2 or Nx2 numpy array of pixels making up the contour
around the object.
rect (Optional[np.ndarray]): A 4 wide tuple/array-like representing the
bounding box around the object as ``(X, Y, width, height)``,
which is the representation returned by cv2.boundingRect.
Returns:
ObjectInImage: Message object filled as described above.
"""
# Create message
msg = ObjectInImage()
# Fill name and attributes from argument
msg.name = name
msg.attributes = attributes
# Fill confidence from argument if given, otherwise use -1
if confidence is None:
msg.confidence = -1.0
else:
msg.confidence = confidence
# Fill points with contour, rect, or center depending on which is set
if contour is not None:
# Reshape to Nx2 in case input was given in cv's native Nx1x2 shape
if len(contour.shape) == 3:
contour = contour.reshape((contour.shape[0], contour.shape[2]))
for point in contour:
msg.points.append(numpy_to_point2d(point))
elif rect is not None:
# Add rectangle as upper left and bottom right points
ul = np.array(rect[0:2])
br = ul + np.array(rect[2:])
msg.points.append(numpy_to_point2d(ul))
msg.points.append(numpy_to_point2d(br))
elif center is not None:
# Add center tuple as single point
msg.points.append(numpy_to_point2d(center))
return msg
class VisionNode(metaclass=abc.ABCMeta):
"""
ABC class to be used unify the interfacing for MIL's computer vision scripts.
Handles the bootstrap of image subscription, enable/disable, etc.
Provides a callback for new images which is expected to return.
Attributes:
camera_model (Optional[:class:`PinholeCameraModel`]): Camera model used throughout the
class. Initially set to ``None``, but later set to an instance of the pinhole
camera model when enabled.
"""
def __init__(self):
self._objects_pub = rospy.Publisher(
"~identified_objects",
ObjectsInImage,
queue_size=3,
)
self._camera_info = None
self.camera_model = None
self._enabled = False
self._image_sub = Image_Subscriber("image", callback=self._img_cb)
if rospy.get_param("~autostart", default=False):
self._enable()
else:
self.METHOD_NAME()
self._enable_srv = rospy.Service("~enable", SetBool, self._enable_cb)
def _enable_cb(self, req):
if req.data and not self._enabled:
self._enable()
elif not req.data and self._enabled:
self.METHOD_NAME()
return {"success": True}
def _enable(self):
if self._camera_info is None:
self._camera_info = self._image_sub.wait_for_camera_info()
self.camera_model = PinholeCameraModel()
self.camera_model.fromCameraInfo(self._camera_info)
self._enabled = True
rospy.loginfo("Enabled.")
def METHOD_NAME(self):
self._enabled = False
rospy.loginfo("Disabled.")
def _img_cb(self, img):
if not self._enabled:
return
msg = ObjectsInImage()
msg.header = self._image_sub.last_image_header
msg.objects = self.find_objects(img)
if not isinstance(msg.objects, list) or (
len(msg.objects) and not isinstance(msg.objects[0], ObjectInImage)
):
rospy.logwarn(
"find_objects did not return a list of mil_msgs/ObjectInImage message. Ignoring.",
)
self._objects_pub.publish(msg)
@abc.abstractmethod
def find_objects(self, img: np.ndarray) -> List[ObjectInImage]:
"""
Given an image as a source, this abstract method should be overridden to return
a list of :class:`ObjectInImage`.
Args:
img (np.ndarray): The source image.
Returns:
List[ObjectInImage]: A list of the objects found in the image.
"""
if __name__ == "__main__":
"""
When this library is run as an executable, run a demo class.
"""
import cv2
from cv_tools import contour_centroid
class VisionNodeExample(VisionNode):
"""
Example implementation of a VisionNode, useful only for reference in real applications
"""
def __init__(self):
# Call base class's init. Important to do this if you override __init__ in child class.
super().__init__()
def find_objects(self, img):
# Get a list of contours in image
blurred = cv2.blur(img, (5, 5))
edges = cv2.Canny(blurred, 100, 200)
_, contours, _ = cv2.findContours(
edges,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE,
)
contours = np.array(contours)
objects = []
# Add each contour, randomly choosing center, contour, or rect to demonstrate all three
# In real application, only one of the three methods will be used depending on the algorithm
# and what information is needed.
for idx, contour in enumerate(contours):
# Demonstration of adding an object where only the center point can be identified
if idx % 3 == 0:
try:
center = contour_centroid(contour)
except ZeroDivisionError:
continue
objects.append(
create_object_msg("contour", center=center, attributes="green"),
)
# Demonstration of adding an object where the entire contour outline can be identified
if idx % 3 == 1:
objects.append(
create_object_msg("contour", contour=contour, confidence=0.5),
)
# Demonstration of adding an object where a bounding rectangle can be identified
if idx % 3 == 2:
objects.append(
create_object_msg(
"contour",
rect=cv2.boundingRect(contour),
confidence=0.8,
),
)
# Log that an image has been received for debugging this demo
rospy.loginfo("Image")
return objects
rospy.init_node("vision_node_example")
node = VisionNodeExample()
rospy.spin() |
299,813 | hexdump | #!/usr/bin/env python3
#
# Copyright 2016 WebAssembly Community Group participants
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
import json
import shutil
import shlex
import signal
import subprocess
import sys
import tempfile
# Get signal names from numbers in Python
# http://stackoverflow.com/a/2549950
SIGNAMES = dict((k, v) for v, k in reversed(sorted(signal.__dict__.items()))
if v.startswith('SIG') and not v.startswith('SIG_'))
class Error(Exception):
pass
class Executable(object):
def __init__(self, exe, *after_args, **kwargs):
self.exe = exe
self.after_args = list(after_args)
self.basename = kwargs.get('basename',
os.path.basename(exe)).replace('.exe', '')
self.error_cmdline = kwargs.get('error_cmdline', True)
self.stdout_handle = self._ForwardHandle(kwargs.get('forward_stdout'))
self.stderr_handle = self._ForwardHandle(kwargs.get('forward_stderr'))
self.verbose = False
def _ForwardHandle(self, forward):
return None if forward else subprocess.PIPE
def _RunWithArgsInternal(self, *args, **kwargs):
cmd = [self.exe] + list(args) + self.after_args
cmd_str = shlex.join(cmd)
if self.verbose:
print(cmd_str)
if self.error_cmdline:
err_cmd_str = cmd_str
else:
err_cmd_str = self.basename
stdout = ''
stderr = ''
error = None
try:
process = subprocess.run(cmd, check=False, text=True,
stdout=self.stdout_handle,
stderr=self.stderr_handle, **kwargs)
stdout = process.stdout
stderr = process.stderr
if process.returncode < 0:
# Terminated by signal
signame = SIGNAMES.get(-process.returncode, '<unknown>')
error = Error('Signal raised running "%s": %s\n%s' % (err_cmd_str,
signame, stderr))
elif process.returncode > 0:
error = Error('Error running "%s" (%d):\n%s\n%s' % (err_cmd_str, process.returncode, stdout, stderr))
except OSError as e:
error = Error('Error running "%s": %s' % (err_cmd_str, str(e)))
return stdout, stderr, error
def RunWithArgsForStdout(self, *args, **kwargs):
stdout, stderr, error = self._RunWithArgsInternal(*args, **kwargs)
if error:
raise error
return stdout
def RunWithArgs(self, *args, **kwargs):
stdout, stderr, error = self._RunWithArgsInternal(*args, **kwargs)
if stdout:
sys.stdout.write(stdout)
if error:
raise error
def AppendArg(self, arg):
self.after_args.append(arg)
def AppendOptionalArgs(self, option_dict):
for option, value in option_dict.items():
if value:
if value is True:
self.AppendArg(option)
else:
self.AppendArg('%s=%s' % (option, value))
@contextlib.contextmanager
def TempDirectory(out_dir, prefix=None):
if out_dir:
out_dir_is_temp = False
if not os.path.exists(out_dir):
os.makedirs(out_dir)
else:
out_dir = tempfile.mkdtemp(prefix=prefix)
out_dir_is_temp = True
try:
yield out_dir
finally:
if out_dir_is_temp:
shutil.rmtree(out_dir)
def ChangeExt(path, new_ext):
return os.path.splitext(path)[0] + new_ext
def ChangeDir(path, new_dir):
return os.path.join(new_dir, os.path.basename(path))
def METHOD_NAME(data):
DUMP_OCTETS_PER_LINE = 16
DUMP_OCTETS_PER_GROUP = 2
p = 0
end = len(data)
lines = []
while p < end:
line_start = p
line_end = p + DUMP_OCTETS_PER_LINE
line = '%07x: ' % p
while p < line_end:
for i in range(DUMP_OCTETS_PER_GROUP):
if p < end:
line += '%02x' % data[p]
else:
line += ' '
p += 1
line += ' '
line += ' '
p = line_start
for i in range(DUMP_OCTETS_PER_LINE):
if p >= end:
break
x = data[p]
if x >= 32 and x < 0x7f:
line += '%c' % x
else:
line += '.'
p += 1
line += '\n'
lines.append(line)
return lines
def GetModuleFilenamesFromSpecJSON(json_filename):
with open(json_filename) as json_file:
json_data = json.load(json_file)
return [m['filename'] for m in json_data['commands'] if 'filename' in m] |
299,814 | get node fanout | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Controller Class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
class Controller(object):
"""Controller class."""
def __init__(self, item, cluster):
"""Controller class initializer.
Args:
item: The metagraph to place wrapped in a cluster.
cluster: A cluster of devices on which to place the item.
"""
self.item = item
self._node = {}
for node in item.metagraph.graph_def.node:
self._node[node.name] = node
self._fanout = defaultdict(lambda: [])
for node in item.metagraph.graph_def.node:
for fanin in self._get_node_fanin(node):
self._fanout[fanin.name].append(node)
important_op_names = item.IdentifyImportantOps(sort_topologically=True)
# List of important ops (these are the ops to place) sorted in topological
# order. The order of this collection is deterministic.
self.important_ops = []
for name in important_op_names:
self.important_ops.append(self._node[name])
self.node_properties = item.GetOpProperties()
self.cluster = cluster
self.devices = cluster.ListDevices()
self.colocation_constraints = item.GetColocationGroups()
self.placement_constraints = cluster.GetSupportedDevices(item)
for node_name, dev in self.placement_constraints.items():
if len(dev) == 1:
# Place the node on the supported device
node = self._node[node_name]
node.device = dev[0]
fanout = self.METHOD_NAME(node)
# Update the fanout of the fanin to bypass the node
for fanin in self._get_node_fanin(node):
fanout_of_fanin = self.METHOD_NAME(fanin)
fanout_of_fanin += fanout
fanout_of_fanin.remove(node)
# Remove node from the list of important ops since we don't need to
# place the node.
if node in self.important_ops:
self.important_ops.remove(node)
important_op_names.remove(node.name)
# List of important op names, in non deterministic order.
self.important_op_names = frozenset(important_op_names)
@property
def input_graph_def(self):
return self.item.metagraph.graph_def
@property
def num_devices(self):
return len(self.devices)
def get_node_by_name(self, node_name):
return self._node[node_name]
def METHOD_NAME(self, node):
return self._fanout[node.name]
def get_placements(self, *args, **kwargs):
"""Returns: Two TF ops.
Args:
*args: "".
**kwargs: "".
Returns:
y_preds: tensor of size [batch_size, num_ops]
log_probs: python dict of at least two fields: "sample", "target" each
containing a tensor of size [batch_size], corresponding to the log_probs.
"""
raise NotImplementedError
def eval_placement(self, sess, *args, **kwargs):
"""At this time, this method evaluates ONLY ONE placement.
Args:
sess: a tf.compat.v1.Session() object used to retrieve cached assignment
info.
*args: "".
**kwargs: "".
Returns:
run_time: scalar
"""
raise NotImplementedError
def export_placement(self, metagraph):
"""Annotate the placement onto the specified metagraph.
Args:
metagraph: the metagraph to annotate with the placement.
"""
for node in metagraph.graph_def.node:
if node.name in self.important_op_names:
node.device = self.get_node_by_name(node.name).device
# Get the nodes in the immediate fanin of node.
# Beware: this doesn't take into account the nodes that may be skipped
# since placement constraints force their placement.
def _get_node_fanin(self, node):
input_ops = []
for fanin_name in node.input:
if fanin_name[0] == "^":
fanin_name = fanin_name[1:]
fanin_name = fanin_name.split(":")[0]
input_ops.append(self.get_node_by_name(fanin_name))
return input_ops |
299,815 | id | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetQuicksightGroupResult',
'AwaitableGetQuicksightGroupResult',
'get_quicksight_group',
'get_quicksight_group_output',
]
@pulumi.output_type
class GetQuicksightGroupResult:
"""
A collection of values returned by getQuicksightGroup.
"""
def __init__(__self__, arn=None, aws_account_id=None, description=None, group_name=None, METHOD_NAME=None, namespace=None, principal_id=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if aws_account_id and not isinstance(aws_account_id, str):
raise TypeError("Expected argument 'aws_account_id' to be a str")
pulumi.set(__self__, "aws_account_id", aws_account_id)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if group_name and not isinstance(group_name, str):
raise TypeError("Expected argument 'group_name' to be a str")
pulumi.set(__self__, "group_name", group_name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", METHOD_NAME)
if namespace and not isinstance(namespace, str):
raise TypeError("Expected argument 'namespace' to be a str")
pulumi.set(__self__, "namespace", namespace)
if principal_id and not isinstance(principal_id, str):
raise TypeError("Expected argument 'principal_id' to be a str")
pulumi.set(__self__, "principal_id", principal_id)
@property
@pulumi.getter
def arn(self) -> str:
"""
The Amazon Resource Name (ARN) for the group.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="awsAccountId")
def aws_account_id(self) -> str:
return pulumi.get(self, "aws_account_id")
@property
@pulumi.getter
def description(self) -> str:
"""
The group description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="groupName")
def group_name(self) -> str:
return pulumi.get(self, "group_name")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
The provider-assigned unique ID for this managed resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
return pulumi.get(self, "namespace")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID of the group.
"""
return pulumi.get(self, "principal_id")
class AwaitableGetQuicksightGroupResult(GetQuicksightGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetQuicksightGroupResult(
arn=self.arn,
aws_account_id=self.aws_account_id,
description=self.description,
group_name=self.group_name,
METHOD_NAME=self.METHOD_NAME,
namespace=self.namespace,
principal_id=self.principal_id)
def get_quicksight_group(aws_account_id: Optional[str] = None,
group_name: Optional[str] = None,
namespace: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetQuicksightGroupResult:
"""
This data source can be used to fetch information about a specific
QuickSight group. By using this data source, you can reference QuickSight group
properties without having to hard code ARNs or unique IDs as input.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.quicksight.get_quicksight_group(group_name="example")
```
:param str aws_account_id: AWS account ID.
:param str group_name: The name of the group that you want to match.
The following arguments are optional:
:param str namespace: QuickSight namespace. Defaults to `default`.
"""
__args__ = dict()
__args__['awsAccountId'] = aws_account_id
__args__['groupName'] = group_name
__args__['namespace'] = namespace
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:quicksight/getQuicksightGroup:getQuicksightGroup', __args__, opts=opts, typ=GetQuicksightGroupResult).value
return AwaitableGetQuicksightGroupResult(
arn=pulumi.get(__ret__, 'arn'),
aws_account_id=pulumi.get(__ret__, 'aws_account_id'),
description=pulumi.get(__ret__, 'description'),
group_name=pulumi.get(__ret__, 'group_name'),
METHOD_NAME=pulumi.get(__ret__, 'id'),
namespace=pulumi.get(__ret__, 'namespace'),
principal_id=pulumi.get(__ret__, 'principal_id'))
@_utilities.lift_output_func(get_quicksight_group)
def get_quicksight_group_output(aws_account_id: Optional[pulumi.Input[Optional[str]]] = None,
group_name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetQuicksightGroupResult]:
"""
This data source can be used to fetch information about a specific
QuickSight group. By using this data source, you can reference QuickSight group
properties without having to hard code ARNs or unique IDs as input.
## Example Usage
### Basic Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.quicksight.get_quicksight_group(group_name="example")
```
:param str aws_account_id: AWS account ID.
:param str group_name: The name of the group that you want to match.
The following arguments are optional:
:param str namespace: QuickSight namespace. Defaults to `default`.
"""
... |
299,816 | get backend | # Copyright (c) 2023 Intel Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from typing import Callable, Dict
import numpy as np
import openvino.runtime as ov
import pytest
import torch
from nncf.openvino.graph.layer_attributes import OVLayerAttributes
from nncf.openvino.graph.metatypes.openvino_metatypes import OVConvolutionMetatype
from nncf.openvino.graph.metatypes.openvino_metatypes import OVMatMulMetatype
from nncf.quantization.algorithms.smooth_quant.openvino_backend import OVSmoothQuantAlgoBackend
from tests.post_training.test_templates.test_smooth_quant import TemplateTestSQAlgorithm
from tests.shared.command import Command
class TestOVSQAlgorithm(TemplateTestSQAlgorithm):
@staticmethod
def fn_to_type(tensor) -> np.ndarray:
return np.array(tensor)
@staticmethod
def get_transform_fn() -> Callable:
def transform_fn(data_item):
tensor, _ = data_item
return {"input.1": tensor}
return transform_fn
@staticmethod
def METHOD_NAME() -> OVSmoothQuantAlgoBackend:
return OVSmoothQuantAlgoBackend()
@staticmethod
def backend_specific_model(model: torch.nn.Module, tmp_dir: str) -> ov.Model:
onnx_path = Path(f"{tmp_dir}/model.onnx")
torch.onnx.export(model, torch.rand(model.INPUT_SIZE), onnx_path, opset_version=13, input_names=["input.1"])
ov_path = Path(f"{tmp_dir}/model.xml")
runner = Command(f"mo -m {onnx_path} -o {tmp_dir} -n model --compress_to_fp16=False")
runner.run()
core = ov.Core()
ov_model = core.read_model(ov_path)
return ov_model
@staticmethod
def check_scales(model: ov.Model, reference_values: Dict[str, np.ndarray]) -> None:
ops_list = {op.get_friendly_name(): op for op in model.get_ops()}
for ref_name, ref_value in reference_values.items():
node = ops_list[ref_name]
const_node = node.input(1).get_source_output().get_node()
assert const_node.get_type_name() == "Constant"
value = const_node.data
ref_value = np.array(ref_value)
assert value.shape == ref_value.shape
assert np.all(np.isclose(value, ref_value, atol=0.0001)), f"{value} != {ref_value}"
@pytest.mark.parametrize(
"node_metatype, layer_attributes, port_id, reference_value",
(
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": False}), 0, -1),
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": True}), 0, -2),
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": False}), 1, -2),
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": True}), 1, -1),
(OVMatMulMetatype, OVLayerAttributes({}, inputs_attributes={"transpose": False}), 2, RuntimeError),
(OVConvolutionMetatype, OVLayerAttributes({}, inputs_attributes={}), 0, 1),
),
)
def test_get_activation_channel_axis(self, node_metatype, layer_attributes, port_id, reference_value):
return super().test_get_activation_channel_axis(node_metatype, layer_attributes, port_id, reference_value)
@pytest.mark.parametrize(
"node_metatype, layer_attributes, port_id, reference_value",
(
(OVMatMulMetatype, OVLayerAttributes({1: {"transpose": False}}), 1, -2),
(OVMatMulMetatype, OVLayerAttributes({1: {"transpose": True}}), 1, -1),
(OVMatMulMetatype, OVLayerAttributes({0: {"transpose": False}}), 0, -1),
(OVMatMulMetatype, OVLayerAttributes({0: {"transpose": True}}), 0, -2),
(OVMatMulMetatype, OVLayerAttributes({1: {"transpose": False}}), 2, RuntimeError),
(OVConvolutionMetatype, OVLayerAttributes({1: {}}), 1, 0),
),
)
def test_get_weight_channel_axis(self, node_metatype, layer_attributes, port_id, reference_value):
return super().test_get_weight_channel_axis(node_metatype, layer_attributes, port_id, reference_value)
@staticmethod
def get_matmul_metatype():
return OVMatMulMetatype |
299,817 | upgrade | # encoding: utf-8
"""Remove activity.revision_id
Revision ID: d4d9be9189fe
Revises: 01afcadbd8c0
Create Date: 2019-11-01 16:33:28.320542
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = u'd4d9be9189fe'
down_revision = u'01afcadbd8c0'
branch_labels = None
depends_on = None
def METHOD_NAME():
op.drop_constraint(u'group_revision_id_fkey', u'group',
type_=u'foreignkey')
op.drop_column(u'group', u'revision_id')
op.drop_constraint(u'group_extra_revision_id_fkey', u'group_extra',
type_=u'foreignkey')
op.drop_column(u'group_extra', u'revision_id')
op.drop_constraint(u'member_revision_id_fkey', u'member',
type_=u'foreignkey')
op.drop_column(u'member', u'revision_id')
op.drop_constraint(u'package_revision_id_fkey', u'package',
type_=u'foreignkey')
op.drop_column(u'package', u'revision_id')
op.drop_constraint(u'package_extra_revision_id_fkey', u'package_extra',
type_=u'foreignkey')
op.drop_column(u'package_extra', u'revision_id')
op.drop_constraint(u'package_relationship_revision_id_fkey',
u'package_relationship', type_=u'foreignkey')
op.drop_column(u'package_relationship', u'revision_id')
op.drop_constraint(u'package_tag_revision_id_fkey', u'package_tag',
type_=u'foreignkey')
op.drop_column(u'package_tag', u'revision_id')
op.drop_constraint(u'resource_revision_id_fkey', u'resource',
type_=u'foreignkey')
op.drop_column(u'resource', u'revision_id')
op.drop_constraint(u'system_info_revision_id_fkey', u'system_info',
type_=u'foreignkey')
op.drop_column(u'system_info', u'revision_id')
def downgrade():
op.add_column(u'system_info',
sa.Column(u'revision_id', sa.TEXT(), autoincrement=False,
nullable=True))
op.create_foreign_key(u'resource_view_resource_id_fkey', u'resource_view',
u'resource', ['resource_id'], ['id'],
onupdate=u'CASCADE', ondelete=u'CASCADE')
op.add_column(u'resource', sa.Column(u'revision_id', sa.TEXT(),
autoincrement=False, nullable=True))
op.create_foreign_key(u'resource_revision_id_fkey', u'resource',
u'revision', [u'revision_id'], ['id'])
op.add_column(u'package_tag', sa.Column(u'revision_id', sa.TEXT(),
autoincrement=False,
nullable=True))
op.create_foreign_key(u'package_tag_revision_id_fkey', u'package_tag',
u'revision', [u'revision_id'], ['id'])
op.add_column(u'package_relationship',
sa.Column(u'revision_id', sa.TEXT(), autoincrement=False,
nullable=True))
op.create_foreign_key(u'package_relationship_revision_id_fkey',
u'package_relationship', u'revision',
[u'revision_id'], ['id'])
op.add_column(u'package_extra', sa.Column(u'revision_id', sa.TEXT(),
autoincrement=False,
nullable=True))
op.create_foreign_key(u'package_extra_revision_id_fkey', u'package_extra',
u'revision', [u'revision_id'], ['id'])
op.add_column(u'package', sa.Column(u'revision_id', sa.TEXT(),
autoincrement=False, nullable=True))
op.create_foreign_key(u'package_revision_id_fkey', u'package', u'revision',
[u'revision_id'], ['id'])
op.add_column(u'member', sa.Column(u'revision_id', sa.TEXT(),
autoincrement=False, nullable=True))
op.create_foreign_key(u'member_revision_id_fkey', u'member', u'revision',
[u'revision_id'], ['id'])
op.add_column(u'group_extra', sa.Column(u'revision_id', sa.TEXT(),
autoincrement=False,
nullable=True))
op.create_foreign_key(u'group_extra_revision_id_fkey', u'group_extra',
u'revision', [u'revision_id'], ['id'])
op.add_column(u'group', sa.Column(u'revision_id', sa.TEXT(),
autoincrement=False, nullable=True))
op.create_foreign_key(u'group_revision_id_fkey', u'group', u'revision',
[u'revision_id'], ['id']) |
299,818 | docs | """Nox sessions."""
import os
import shutil
import sys
from pathlib import Path
from typing import Iterable
from typing import Iterator
import nox
package = "kaskada"
python_versions = ["3.11", "3.10", "3.9"]
nox.needs_version = ">= 2021.6.6"
nox.options.sessions = (
"check-lint",
"safety",
"mypy",
"tests",
"typeguard",
"xdoctest",
"docs-build",
)
@nox.session(name="check-lint", python=python_versions[0])
def check_lint(session: nox.Session) -> None:
"""Lint."""
args = session.posargs or ["pysrc", "pytests", "docs/source"]
install(session, groups=["lint"], root=False)
session.run("black", "--check", *args)
session.run("flake8", *args)
session.run("isort", "--filter-files", "--check-only", *args)
session.run("pydocstyle", "--convention=google", "pysrc")
# No way to run this as a check.
# session.run("pyupgrade", "--py38-plus")
@nox.session(name="fix-lint", python=python_versions[0])
def fix_lint(session: nox.Session) -> None:
"""Automatically fix lint issues."""
args = session.posargs or ["pysrc", "pytests", "docs/source"]
install(session, groups=["lint"], root=False)
session.run("autoflake", "--in-place", "--remove-all-unused-imports", "--recursive", *args)
session.run("isort", "--filter-files", *args)
session.run("pyupgrade", "--py38-plus")
session.run("black", *args)
@nox.session(python=python_versions[0])
def safety(session: nox.Session) -> None:
"""Scan dependencies for insecure packages."""
# NOTE: Pass `extras` to `export_requirements` if the project supports any.
requirements = export_requirements(session)
install(session, groups=["safety"], root=False)
session.run("safety", "check", "--full-report", f"--file={requirements}")
@nox.session(python=python_versions)
def mypy(session: nox.Session) -> None:
"""Type-check using mypy."""
args = session.posargs or ["pysrc", "pytests"]
install(session, groups=["typecheck"])
# Using `--install-types` should make this less picky about missing stubs.
# However, there is a possibility it slows things down, by making mypy
# run twice -- once to determine what types need to be installed, then once
# to check things with those stubs.
session.run("mypy", "--install-types", "--non-interactive", *args)
if not session.posargs:
session.run("mypy", f"--python-executable={sys.executable}", "noxfile.py")
@nox.session(python=python_versions)
def tests(session: nox.Session) -> None:
"""Run the test suite."""
install(session, groups=["test"])
try:
session.run("coverage", "run", "--parallel", "-m", "pytest", *session.posargs)
finally:
if session.interactive:
session.notify("coverage", posargs=[])
@nox.session(python=python_versions[0])
def coverage(session: nox.Session) -> None:
"""Produce the coverage report."""
args = session.posargs or ["report"]
install(session, groups=["test"])
if not session.posargs and any(Path().glob(".coverage.*")):
session.run("coverage", "combine")
session.run("coverage", *args)
@nox.session(python=python_versions[0])
def typeguard(session: nox.Session) -> None:
"""Runtime type checking using Typeguard."""
install(session, groups=["typecheck", "test"])
session.run("pytest", f"--typeguard-packages={package}", *session.posargs)
@nox.session(python=python_versions)
def xdoctest(session: nox.Session) -> None:
"""Run examples with xdoctest."""
if session.posargs:
args = [package, *session.posargs]
else:
args = [f"--modname={package}", "--command=all"]
if "FORCE_COLOR" in os.environ:
args.append("--colored=1")
install(session, groups=["test"])
session.run("python", "-m", "xdoctest", *args)
@nox.session(name="docs-build", python=python_versions[0])
def docs_build(session: nox.Session) -> None:
"""Build the documentation."""
# ablog doesn't currently indicate whether it supports parallel reads,
# leading to a warning.
# when possible, add `"-j", "auto",` to do parallel builds (and in CI).
args = session.posargs or ["docs/source", "docs/_build", "-W"]
if not session.posargs and "FORCE_COLOR" in os.environ:
args.insert(0, "--color")
install(session, groups=["typecheck", "docs"])
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-build", *args)
@nox.session(python=python_versions[0])
def METHOD_NAME(session: nox.Session) -> None:
"""Build and serve the documentation with live reloading on file changes."""
args = ["--open-browser", "docs/source", "docs/_build", "-j", "auto", "--ignore", "*/apidocs/*", "--watch", "pysrc/kaskada"]
install(session, groups=["typecheck", "docs"])
build_dir = Path("docs", "_build")
if build_dir.exists():
shutil.rmtree(build_dir)
session.run("sphinx-autobuild", *args)
def install(session: nox.Session, *, groups: Iterable[str], root: bool = True) -> None:
"""Install the dependency groups using Poetry.
This function installs the given dependency groups into the session's
virtual environment. When ``root`` is true (the default), the function
also installs the root package's default dependencies.
The root package is installed using `maturin develop`.
Args:
session: The Session object.
groups: The dependency groups to install.
root: Install the root package.
"""
session.run_always(
"poetry",
"install",
"--no-root",
"--sync",
"--{}={}".format("only" if not root else "with", ",".join(groups)),
external=True,
)
if root:
session.run_always("maturin", "develop", "--profile", "dev")
def export_requirements(session: nox.Session, *, extras: Iterable[str] = ()) -> Path:
"""Export a requirements file from Poetry.
This function uses ``poetry export`` to generate a requirements file
containing the default dependencies at the versions specified in
``poetry.lock``.
Args:
session: The Session object.
extras: Extras supported by the project.
Returns:
The path to the requirements file.
"""
# XXX Use poetry-export-plugin with dependency groups
output = session.run_always(
"poetry",
"export",
"--format=requirements.txt",
"--without-hashes",
*[f"--extras={extra}" for extra in extras],
external=True,
silent=True,
stderr=None,
)
if output is None:
session.skip(
"The command `poetry export` was not executed"
" (a possible cause is specifying `--no-install`)"
)
assert isinstance(output, str) # noqa: S101
def _stripwarnings(lines: Iterable[str]) -> Iterator[str]:
for line in lines:
if line.startswith("Warning:"):
print(line, file=sys.stderr)
continue
yield line
text = "".join(_stripwarnings(output.splitlines(keepends=True)))
path = session.cache_dir / "requirements.txt"
path.write_text(text)
return pat |
299,819 | update conda forge config | import shutil
import tempfile
import io
import jinja2
import datetime
import time
import os
import sys
from pathlib import Path
from collections import defaultdict
from contextlib import contextmanager
import ruamel.yaml
def get_feedstock_name_from_meta(meta):
"""Resolve the feedtstock name from the parsed meta.yaml."""
if "feedstock-name" in meta.meta["extra"]:
return meta.meta["extra"]["feedstock-name"]
elif "parent_recipe" in meta.meta["extra"]:
return meta.meta["extra"]["parent_recipe"]["name"]
else:
return meta.name()
def get_feedstock_about_from_meta(meta) -> dict:
"""Fetch the feedtstock about from the parsed meta.yaml."""
# it turns out that conda_build would not preserve the feedstock about:
# - if a subpackage does not have about, it uses the feedstock's
# - if a subpackage has about, it's used as is
# therefore we need to parse the yaml again just to get the about section...
if "parent_recipe" in meta.meta["extra"]:
recipe_meta = os.path.join(
meta.meta["extra"]["parent_recipe"]["path"], "meta.yaml"
)
with io.open(recipe_meta, "rt") as fh:
content = render_meta_yaml("".join(fh))
meta = get_yaml().load(content)
return dict(meta["about"])
else:
# no parent recipe for any reason, use self's about
return dict(meta.meta["about"])
def get_yaml():
# define global yaml API
# roundrip-loader and allowing duplicate keys
# for handling # [filter] / # [not filter]
# Don't use a global variable for this as a global
# variable will make conda-smithy thread unsafe.
yaml = ruamel.yaml.YAML(typ="rt")
yaml.allow_duplicate_keys = True
return yaml
@contextmanager
def tmp_directory():
tmp_dir = tempfile.mkdtemp("_recipe")
yield tmp_dir
shutil.rmtree(tmp_dir)
class NullUndefined(jinja2.Undefined):
def __unicode__(self):
return self._undefined_name
def __getattr__(self, name):
return "{}.{}".format(self, name)
def __getitem__(self, name):
return '{}["{}"]'.format(self, name)
class MockOS(dict):
def __init__(self):
self.environ = defaultdict(lambda: "")
self.sep = "/"
def stub_compatible_pin(*args, **kwargs):
return f"compatible_pin {args[0]}"
def stub_subpackage_pin(*args, **kwargs):
return f"subpackage_pin {args[0]}"
def render_meta_yaml(text):
env = jinja2.Environment(undefined=NullUndefined)
# stub out cb3 jinja2 functions - they are not important for linting
# if we don't stub them out, the ruamel.yaml load fails to interpret them
# we can't just use conda-build's api.render functionality, because it would apply selectors
env.globals.update(
dict(
compiler=lambda x: x + "_compiler_stub",
pin_subpackage=stub_subpackage_pin,
pin_compatible=stub_compatible_pin,
cdt=lambda *args, **kwargs: "cdt_stub",
load_file_regex=lambda *args, **kwargs: defaultdict(lambda: ""),
datetime=datetime,
time=time,
target_platform="linux-64",
mpi="mpi",
)
)
mockos = MockOS()
py_ver = "3.7"
context = {"os": mockos, "environ": mockos.environ, "PY_VER": py_ver}
content = env.from_string(text).render(context)
return content
@contextmanager
def METHOD_NAME(forge_yaml):
"""Utility method used to update conda forge configuration files
Uage:
>>> with update_conda_forge_config(somepath) as cfg:
... cfg['foo'] = 'bar'
"""
if os.path.exists(forge_yaml):
with open(forge_yaml, "r") as fh:
code = get_yaml().load(fh)
else:
code = {}
# Code could come in as an empty list.
if not code:
code = {}
yield code
get_yaml().dump(code, Path(forge_yaml))
def merge_dict(src, dest):
"""Recursive merge dictionary"""
for key, value in src.items():
if isinstance(value, dict):
# get node or create one
node = dest.setdefault(key, {})
merge_dict(value, node)
else:
dest[key] = value
return dest |
299,820 | to str | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Submarine API
The Submarine REST API allows you to access Submarine resources such as, experiments, environments and notebooks. The API is hosted under the /v1 path on the Submarine server. For example, to list experiments on a server hosted at http://localhost:8080, access http://localhost:8080/api/v1/experiment/ # noqa: E501
The version of the OpenAPI document: 0.9.0-SNAPSHOT
Contact: dev@submarine.apache.org
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from submarine.client.configuration import Configuration
class JsonResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {'attributes': 'dict(str, object)', 'code': 'int', 'result': 'object', 'success': 'bool'}
attribute_map = {'attributes': 'attributes', 'code': 'code', 'result': 'result', 'success': 'success'}
def __init__(
self, attributes=None, code=None, result=None, success=None, local_vars_configuration=None
): # noqa: E501
"""JsonResponse - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._attributes = None
self._code = None
self._result = None
self._success = None
self.discriminator = None
if attributes is not None:
self.attributes = attributes
if code is not None:
self.code = code
if result is not None:
self.result = result
if success is not None:
self.success = success
@property
def attributes(self):
"""Gets the attributes of this JsonResponse. # noqa: E501
:return: The attributes of this JsonResponse. # noqa: E501
:rtype: dict(str, object)
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this JsonResponse.
:param attributes: The attributes of this JsonResponse. # noqa: E501
:type: dict(str, object)
"""
self._attributes = attributes
@property
def code(self):
"""Gets the code of this JsonResponse. # noqa: E501
:return: The code of this JsonResponse. # noqa: E501
:rtype: int
"""
return self._code
@code.setter
def code(self, code):
"""Sets the code of this JsonResponse.
:param code: The code of this JsonResponse. # noqa: E501
:type: int
"""
self._code = code
@property
def result(self):
"""Gets the result of this JsonResponse. # noqa: E501
:return: The result of this JsonResponse. # noqa: E501
:rtype: object
"""
return self._result
@result.setter
def result(self, result):
"""Sets the result of this JsonResponse.
:param result: The result of this JsonResponse. # noqa: E501
:type: object
"""
self._result = result
@property
def success(self):
"""Gets the success of this JsonResponse. # noqa: E501
:return: The success of this JsonResponse. # noqa: E501
:rtype: bool
"""
return self._success
@success.setter
def success(self, success):
"""Sets the success of this JsonResponse.
:param success: The success of this JsonResponse. # noqa: E501
:type: bool
"""
self._success = success
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item,
value.items(),
)
)
else:
result[attr] = value
return result
def METHOD_NAME(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.METHOD_NAME()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JsonResponse):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, JsonResponse):
return True
return self.to_dict() != other.to_dict() |
299,821 | prop descriptions | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "parcoords.line.colorbar"
_path_str = "parcoords.line.colorbar.title"
_valid_props = {"font", "side", "text"}
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.parcoords.line.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.parcoords.line.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h". Note that the
title's location used to be set by the now deprecated
`titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def METHOD_NAME(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.parcoords.line
.colorbar.Title`
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h". Note that the title's location
used to be set by the now deprecated `titleside`
attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.parcoords.line.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("side", None)
_v = side if side is not None else _v
if _v is not None:
self["side"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False |
299,822 | signal handler | # Copyright 2021 Hathor Labs
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import math
import signal
import sys
import time
from argparse import ArgumentParser, Namespace
from json.decoder import JSONDecodeError
from multiprocessing import Process, Queue
import requests
_SLEEP_ON_ERROR_SECONDS = 5
_MAX_CONN_RETRIES = math.inf
def METHOD_NAME(sig, frame):
sys.exit(0)
def worker(q_in, q_out):
signal.signal(signal.SIGINT, METHOD_NAME)
block, start, end, sleep_seconds = q_in.get()
block.start_mining(start, end, sleep_seconds=sleep_seconds)
q_out.put(block)
def create_parser() -> ArgumentParser:
from hathor.cli.util import create_parser
parser = create_parser()
parser.add_argument('url', help='URL to get mining bytes')
parser.add_argument('--init-delay', type=float, help='Wait N seconds before starting (in seconds)', default=None)
parser.add_argument('--sleep', type=float, help='Sleep every 2 seconds (in seconds)')
parser.add_argument('--count', type=int, help='Quantity of blocks to be mined')
parser.add_argument('--address', help='Address to mine blocks')
return parser
def execute(args: Namespace) -> None:
from requests.exceptions import ConnectionError
from hathor.transaction import Block
from hathor.transaction.exceptions import HathorError
print('Hathor CPU Miner v1.0.0')
print('URL: {}'.format(args.url))
if args.init_delay:
print('Init delay {} seconds'.format(args.init_delay))
time.sleep(args.init_delay)
signal.signal(signal.SIGINT, METHOD_NAME)
sleep_seconds = 0
if args.sleep:
sleep_seconds = args.sleep
total = 0
conn_retries = 0
q_in: Queue[tuple[Block, int, int, int]]
q_out: Queue[Block]
q_in, q_out = Queue(), Queue()
while True:
print('Requesting mining information...')
try:
params = {}
if args.address:
params['address'] = args.address
response = requests.get(args.url, params=params)
except ConnectionError as e:
print('Error connecting to server: {}'.format(args.url))
print(e)
if conn_retries >= _MAX_CONN_RETRIES:
print('Too many connection failures, giving up.')
sys.exit(1)
else:
conn_retries += 1
print('Waiting {} seconds to try again ({} of {})...'.format(_SLEEP_ON_ERROR_SECONDS, conn_retries,
_MAX_CONN_RETRIES))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
else:
conn_retries = 0
if response.status_code == 503:
print('Node still syncing. Waiting {} seconds to try again...'.format(_SLEEP_ON_ERROR_SECONDS))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
try:
data = response.json()
except JSONDecodeError as e:
print('Error reading response from server: {}'.format(response))
print(e)
print('Waiting {} seconds to try again...'.format(_SLEEP_ON_ERROR_SECONDS))
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
if 'block_bytes' not in data:
print('Something is wrong in the response.')
print(data)
time.sleep(_SLEEP_ON_ERROR_SECONDS)
continue
block_bytes = base64.b64decode(data['block_bytes'])
block = Block.create_from_struct(block_bytes)
assert block.hash is not None
assert isinstance(block, Block)
print('Mining block with weight {}'.format(block.weight))
p = Process(target=worker, args=(q_in, q_out))
p.start()
q_in.put((block, 0, 2**32, sleep_seconds))
p.join()
block = q_out.get()
block.update_hash()
assert block.hash is not None
print('[{}] New block found: {} (nonce={}, weight={})'.format(datetime.datetime.now(), block.hash.hex(),
block.nonce, block.weight))
try:
block.verify_without_storage()
except HathorError:
print('[{}] ERROR: Block has not been pushed because it is not valid.'.format(datetime.datetime.now()))
else:
block_bytes = block.get_struct()
response = requests.post(args.url, json={'block_bytes': base64.b64encode(block_bytes).decode('utf-8')})
if not response.ok:
print('[{}] ERROR: Block has been rejected. Unknown exception.'.format(datetime.datetime.now()))
if response.ok and response.text != '1':
print('[{}] ERROR: Block has been rejected.'.format(datetime.datetime.now()))
print('')
total += 1
if args.count and total == args.count:
break
def main():
parser = create_parser()
args = parser.parse_args()
execute(args) |
299,823 | tobytes | #
# The Python Imaging Library.
# $Id$
#
# image palette object
#
# History:
# 1996-03-11 fl Rewritten.
# 1997-01-03 fl Up and running.
# 1997-08-23 fl Added load hack
# 2001-04-16 fl Fixed randint shadow bug in random()
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1996-1997 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import array
from . import GimpGradientFile, GimpPaletteFile, ImageColor, PaletteFile
class ImagePalette(object):
"""
Color palette for palette mapped images
:param mode: The mode to use for the Palette. See:
:ref:`concept-modes`. Defaults to "RGB"
:param palette: An optional palette. If given, it must be a bytearray,
an array or a list of ints between 0-255 and of length ``size``
times the number of colors in ``mode``. The list must be aligned
by channel (All R values must be contiguous in the list before G
and B values.) Defaults to 0 through 255 per channel.
:param size: An optional palette size. If given, it cannot be equal to
or greater than 256. Defaults to 0.
"""
def __init__(self, mode="RGB", palette=None, size=0):
self.mode = mode
self.rawmode = None # if set, palette contains raw data
self.palette = palette or bytearray(range(256)) * len(self.mode)
self.colors = {}
self.dirty = None
if (size == 0 and len(self.mode) * 256 != len(self.palette)) or (
size != 0 and size != len(self.palette)
):
raise ValueError("wrong palette size")
def copy(self):
new = ImagePalette()
new.mode = self.mode
new.rawmode = self.rawmode
if self.palette is not None:
new.palette = self.palette[:]
new.colors = self.colors.copy()
new.dirty = self.dirty
return new
def getdata(self):
"""
Get palette contents in format suitable for the low-level
``im.putpalette`` primitive.
.. warning:: This method is experimental.
"""
if self.rawmode:
return self.rawmode, self.palette
return self.mode + ";L", self.METHOD_NAME()
def METHOD_NAME(self):
"""Convert palette to bytes.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(self.palette, bytes):
return self.palette
arr = array.array("B", self.palette)
if hasattr(arr, "tobytes"):
return arr.METHOD_NAME()
return arr.tostring()
# Declare tostring as an alias for tobytes
tostring = METHOD_NAME
def getcolor(self, color):
"""Given an rgb tuple, allocate palette entry.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(color, tuple):
try:
return self.colors[color]
except KeyError:
# allocate new color slot
if isinstance(self.palette, bytes):
self.palette = bytearray(self.palette)
index = len(self.colors)
if index >= 256:
raise ValueError("cannot allocate more than 256 colors")
self.colors[color] = index
self.palette[index] = color[0]
self.palette[index + 256] = color[1]
self.palette[index + 512] = color[2]
self.dirty = 1
return index
else:
raise ValueError("unknown color specifier: %r" % color)
def save(self, fp):
"""Save palette to text file.
.. warning:: This method is experimental.
"""
if self.rawmode:
raise ValueError("palette contains raw palette data")
if isinstance(fp, str):
fp = open(fp, "w")
fp.write("# Palette\n")
fp.write("# Mode: %s\n" % self.mode)
for i in range(256):
fp.write("%d" % i)
for j in range(i * len(self.mode), (i + 1) * len(self.mode)):
try:
fp.write(" %d" % self.palette[j])
except IndexError:
fp.write(" 0")
fp.write("\n")
fp.close()
# --------------------------------------------------------------------
# Internal
def raw(rawmode, data):
palette = ImagePalette()
palette.rawmode = rawmode
palette.palette = data
palette.dirty = 1
return palette
# --------------------------------------------------------------------
# Factories
def make_linear_lut(black, white):
lut = []
if black == 0:
for i in range(256):
lut.append(white * i // 255)
else:
raise NotImplementedError # FIXME
return lut
def make_gamma_lut(exp):
lut = []
for i in range(256):
lut.append(int(((i / 255.0) ** exp) * 255.0 + 0.5))
return lut
def negative(mode="RGB"):
palette = list(range(256))
palette.reverse()
return ImagePalette(mode, palette * len(mode))
def random(mode="RGB"):
from random import randint
palette = []
for i in range(256 * len(mode)):
palette.append(randint(0, 255))
return ImagePalette(mode, palette)
def sepia(white="#fff0c0"):
r, g, b = ImageColor.getrgb(white)
r = make_linear_lut(0, r)
g = make_linear_lut(0, g)
b = make_linear_lut(0, b)
return ImagePalette("RGB", r + g + b)
def wedge(mode="RGB"):
return ImagePalette(mode, list(range(256)) * len(mode))
def load(filename):
# FIXME: supports GIMP gradients only
with open(filename, "rb") as fp:
for paletteHandler in [
GimpPaletteFile.GimpPaletteFile,
GimpGradientFile.GimpGradientFile,
PaletteFile.PaletteFile,
]:
try:
fp.seek(0)
lut = paletteHandler(fp).getpalette()
if lut:
break
except (SyntaxError, ValueError):
# import traceback
# traceback.print_exc()
pass
else:
raise IOError("cannot load palette")
return lut # data, rawmode |
299,824 | run dir | """Importable config object."""
import os
from datetime import datetime
from pathlib import Path
from types import MappingProxyType
from typing import Optional, Union
import yaml
import esmvalcore
from esmvalcore.cmor.check import CheckLevels
from ._config_validators import (
_deprecated_options_defaults,
_deprecators,
_validators,
)
from ._validated_config import ValidatedConfig
URL = ('https://docs.esmvaltool.org/projects/'
'ESMValCore/en/latest/quickstart/configure.html')
class Config(ValidatedConfig):
"""ESMValTool configuration object.
Do not instantiate this class directly, but use
:obj:`esmvalcore.config.CFG` instead.
"""
_validate = _validators
_deprecate = _deprecators
_deprecated_defaults = _deprecated_options_defaults
_warn_if_missing = (
('drs', URL),
('rootpath', URL),
)
@classmethod
def _load_user_config(cls,
filename: Union[os.PathLike, str],
raise_exception: bool = True):
"""Load user configuration from the given file.
The config is cleared and updated in-place.
Parameters
----------
filename: pathlike
Name of the config file, must be yaml format
raise_exception : bool
Raise an exception if `filename` can not be found (default).
Otherwise, silently pass and use the default configuration. This
setting is necessary for the case where
`.esmvaltool/config-user.yml` has not been defined (i.e. first
start).
"""
new = cls()
try:
mapping = _read_config_file(filename)
mapping['config_file'] = filename
except IOError:
if raise_exception:
raise
mapping = {}
new.update(CFG_DEFAULT)
new.update(mapping)
new.check_missing()
return new
@classmethod
def _load_default_config(cls, filename: Union[os.PathLike, str]):
"""Load the default configuration."""
new = cls()
mapping = _read_config_file(filename)
# Add defaults that are not available in esmvalcore/config-user.yml
mapping['check_level'] = CheckLevels.DEFAULT
mapping['config_file'] = filename
mapping['diagnostics'] = None
mapping['extra_facets_dir'] = tuple()
mapping['max_datasets'] = None
mapping['max_years'] = None
mapping['resume_from'] = []
mapping['run_diagnostic'] = True
mapping['skip_nonexistent'] = False
new.update(mapping)
return new
def load_from_file(
self,
filename: Optional[Union[os.PathLike, str]] = None,
) -> None:
"""Load user configuration from the given file."""
if filename is None:
filename = USER_CONFIG
path = Path(filename).expanduser()
if not path.exists():
try_path = USER_CONFIG_DIR / filename
if try_path.exists():
path = try_path
else:
raise FileNotFoundError(f'Cannot find: `{filename}`'
f'locally or in `{try_path}`')
self.clear()
self.update(Config._load_user_config(path))
def reload(self):
"""Reload the config file."""
filename = self.get('config_file', DEFAULT_CONFIG)
self.load_from_file(filename)
def start_session(self, name: str):
"""Start a new session from this configuration object.
Parameters
----------
name: str
Name of the session.
Returns
-------
Session
"""
return Session(config=self.copy(), name=name)
class Session(ValidatedConfig):
"""Container class for session configuration and directory information.
Do not instantiate this class directly, but use
:obj:`CFG.start_session` instead.
Parameters
----------
config : dict
Dictionary with configuration settings.
name : str
Name of the session to initialize, for example, the name of the
recipe (default='session').
"""
_validate = _validators
_deprecate = _deprecators
_deprecated_defaults = _deprecated_options_defaults
relative_preproc_dir = Path('preproc')
relative_work_dir = Path('work')
relative_plot_dir = Path('plots')
relative_run_dir = Path('run')
relative_main_log = Path('run', 'main_log.txt')
relative_main_log_debug = Path('run', 'main_log_debug.txt')
_relative_fixed_file_dir = Path('preproc', 'fixed_files')
def __init__(self, config: dict, name: str = 'session'):
super().__init__(config)
self.session_name: Union[str, None] = None
self.set_session_name(name)
def set_session_name(self, name: str = 'session'):
"""Set the name for the session.
The `name` is used to name the session directory, e.g.
`session_20201208_132800/`. The date is suffixed automatically.
"""
now = datetime.utcnow().strftime("%Y%m%d_%H%M%S")
self.session_name = f"{name}_{now}"
@property
def session_dir(self):
"""Return session directory."""
return self['output_dir'] / self.session_name
@property
def preproc_dir(self):
"""Return preproc directory."""
return self.session_dir / self.relative_preproc_dir
@property
def work_dir(self):
"""Return work directory."""
return self.session_dir / self.relative_work_dir
@property
def plot_dir(self):
"""Return plot directory."""
return self.session_dir / self.relative_plot_dir
@property
def METHOD_NAME(self):
"""Return run directory."""
return self.session_dir / self.relative_run_dir
@property
def config_dir(self):
"""Return user config directory."""
return USER_CONFIG_DIR
@property
def main_log(self):
"""Return main log file."""
return self.session_dir / self.relative_main_log
@property
def main_log_debug(self):
"""Return main log debug file."""
return self.session_dir / self.relative_main_log_debug
@property
def _fixed_file_dir(self):
"""Return fixed file directory."""
return self.session_dir / self._relative_fixed_file_dir
def _read_config_file(config_file):
"""Read config user file and store settings in a dictionary."""
config_file = Path(config_file)
if not config_file.exists():
raise IOError(f'Config file `{config_file}` does not exist.')
with open(config_file, 'r') as file:
cfg = yaml.safe_load(file)
return cfg
DEFAULT_CONFIG_DIR = Path(esmvalcore.__file__).parent
DEFAULT_CONFIG = DEFAULT_CONFIG_DIR / 'config-user.yml'
USER_CONFIG_DIR = Path.home() / '.esmvaltool'
USER_CONFIG = USER_CONFIG_DIR / 'config-user.yml'
# initialize placeholders
CFG_DEFAULT = MappingProxyType(Config._load_default_config(DEFAULT_CONFIG))
CFG = Config._load_user_config(USER_CONFIG, raise_exception=False) |
299,825 | calculate seeds | #!/usr/bin/env python3
# Copyright (C) 2021 David Guillen Fandos
# Parses a CHT file and attempts to decrypt any cheats that might be encrypted
# Will just copy the contents should the cheats be unencrypted.
# This will effectively remove any master cheat if it's only used to encrypt
import sys, re, struct
def ror(v, a):
return ((v >> a) | (v << (32 - a))) & 0xffffffff
def ishex(s):
return all(x.upper() in "0123456789ABCDEF" for x in s)
# Generates 32 bits out of the LFSR by combining three step ouputs
def lfsr_advance(state0):
state1 = (state0 * 0x41C64E6D + 0x3039) & 0xffffffff
state2 = (state1 * 0x41C64E6D + 0x3039) & 0xffffffff
state3 = (state2 * 0x41C64E6D + 0x3039) & 0xffffffff
# Combine the three states into one
return (((state1 << 14) & 0xC0000000) |
((state2 >> 1) & 0x3FFF8000) |
((state3 >> 16) & 0x00007FFF), state3)
def next_tblidx(lfsr_state):
roll, lfsr_state = lfsr_advance(lfsr_state)
count = 48
if roll == count:
roll = 0
if roll < count:
return roll, lfsr_state
bit = 1
while count < 0x10000000 and count < roll:
count = (count << 4) & 0xFFFFFFFF
bit = (bit << 4) & 0xFFFFFFFF
while count < 0x80000000 and count < roll:
count = (count << 1) & 0xFFFFFFFF
bit = (bit << 1) & 0xFFFFFFFF
while True:
mask = 0
if roll >= count:
roll -= count
if roll >= (count >> 1):
roll -= (count >> 1)
mask |= ror(bit, 1)
if roll >= (count >> 2):
roll -= (count >> 2)
mask |= ror(bit, 2)
if roll >= (count >> 3):
roll -= (count >> 3)
mask |= ror(bit, 3)
if roll == 0 or (bit >> 4) == 0:
break
bit >>= 4
count >>= 4
mask &= 0xE0000000
if mask == 0 or (bit & 7) == 0:
return roll, lfsr_state
if mask & ror(bit, 3):
roll += (count >> 3)
if mask & ror(bit, 2):
roll += (count >> 2)
if mask & ror(bit, 1):
roll += (count >> 1)
return roll, lfsr_state
def decrypt(addr, val, encdata):
buf = list(struct.pack(">IH", addr, val))
deckey, tbldata, seeds = encdata
for i in range(47, -1, -1):
off1 = i >> 3
off2 = tbldata[i] >> 3
bit1 = i & 7
bit2 = tbldata[i] & 7
# Extract the indicated bits
p1 = (buf[off1] >> bit1) & 1
p2 = (buf[off2] >> bit2) & 1
# Swap bits, first clear then set if necessary
buf[off1] &= ~(1 << bit1)
buf[off2] &= ~(1 << bit2)
buf[off1] |= (p2 << bit1)
buf[off2] |= (p1 << bit2)
# Xor decrypt with the calculated values
s1 = struct.pack(">IH", seeds[0], seeds[1] & 0xffff)
buf = [a ^ b for (a, b) in zip(buf, s1)]
for i in range(5):
buf[i] ^= (((deckey >> 8) ^ buf[i+1]) & 0xff)
buf[5] ^= ((deckey >> 8) & 0xff)
for i in range(5, 0, -1):
buf[i] ^= ((deckey ^ buf[i-1]) & 0xff)
buf[0] ^= (deckey & 0xff)
s2 = struct.pack(">IH", seeds[2], seeds[3] & 0xffff)
buf = bytes(a ^ b for (a, b) in zip(buf, s2))
return struct.unpack(">IH", buf)
def METHOD_NAME(addr, val):
tbl = list(range(48))
rngstate = (val & 0xff) ^ 0x1111
# Performs some table swaps based on the code
for i in range(80):
p1, rngstate = next_tblidx(rngstate)
p2, rngstate = next_tblidx(rngstate)
tbl[p1], tbl[p2] = tbl[p2], tbl[p1]
# Reinitialize the RNG now to a fixed value and draw a variable number
rngstate = 0x4EFAD1C3
for i in range((addr >> 24) & 15):
# Yeah this is on purpose, the output wired to the state
rngstate, _ = lfsr_advance(rngstate)
seed2, rngstate = lfsr_advance(rngstate)
seed3, rngstate = lfsr_advance(rngstate)
# Do it again, super secure stuff :P
rngstate = (val >> 8) ^ 0xF254
for i in range(val >> 8):
# Yeah this is on purpose, the output wired to the state
rngstate, _ = lfsr_advance(rngstate)
seed0, rngstate = lfsr_advance(rngstate)
seed1, rngstate = lfsr_advance(rngstate)
return (addr, tbl, [seed0, seed1, seed2, seed3])
with open(sys.argv[1]) as ifd:
vrs = {}
for line in ifd.read().split("\n"):
m = re.match("^([^\s=]+)\s*=\s*\"([^\"]+)\"$", line.strip())
if m:
vrs[m.group(1)] = m.group(2)
else:
m = re.match("^([^\s=]+)\s*=\s*([^\s=]+)$", line.strip())
if m:
vrs[m.group(1)] = m.group(2)
assert "cheats" in vrs
outtext = "cheats = %s\n\n" % vrs["cheats"]
encdata = None
for i in range(int(vrs["cheats"])):
cdesc = vrs["cheat%d_desc" % i]
ccode = vrs["cheat%d_code" % i].upper()
cenab = vrs["cheat%d_enable" % i]
m = ccode.split("+")
if not all(len(x) == 12 and ishex(x) for x in m):
m = ccode.split(" ")
if not all(len(x) == 12 and ishex(x) for x in m):
m = re.findall("[0-9a-fA-F]{8}[\+ ][0-9a-fA-F]{4}", ccode)
if not m:
print("Bad code", ccode)
sys.exit(1)
ocodes = []
for c in m:
if "+" in c:
adrs, val = c.split("+")
elif " " in c:
adrs, val = c.split(" ")
else:
adrs, val = c[:8], c[8:]
addr, val = int(adrs, 16), int(val, 16)
if encdata:
# Decode the data first!
addr, val = decrypt(addr, val, encdata)
elif adrs[0] == '9':
# Update encryption data, next codes must be encrypted
encdata = METHOD_NAME(addr, val)
#print("Change code", c, encdata)
continue # Skip this code since it's now useless
finalcode = "%08x+%04x" % (addr, val)
ocodes.append(finalcode)
# Update code!
vrs["cheat%d_code" % i] = "+".join(ocodes).upper()
outtext += 'cheat%d_desc = "%s"\n' % (i, vrs["cheat%d_desc" % i])
outtext += 'cheat%d_code = "%s"\n' % (i, vrs["cheat%d_code" % i])
outtext += 'cheat%d_enable = false\n\n' % i
with open(sys.argv[1], "w") as ofd:
ofd.write(outtext)
|
299,826 | run | #!/usr/bin/env python
'''
Ironhouse extends Stonehouse with client public key authentication.
This is the strongest security model we have today, protecting against every
attack we know about, except end-point attacks (where an attacker plants
spyware on a machine to capture data before it's encrypted, or after it's
decrypted).
This example demonstrates using the IOLoopAuthenticator.
Author: Chris Laws
'''
import asyncio
import logging
import os
import sys
from typing import List
from tornado import ioloop
import zmq
import zmq.auth
from zmq.auth.asyncio import AsyncioAuthenticator
from zmq.eventloop import zmqstream
def echo(server: zmqstream.ZMQStream, msg: List[bytes]) -> None:
logging.debug("server recvd %s", msg)
reply = msg + [b'World']
logging.debug("server sending %s", reply)
server.send_multipart(reply)
def setup_server(server_secret_file: str, endpoint: str = 'tcp://127.0.0.1:9000'):
"""setup a simple echo server with CURVE auth"""
server = zmq.Context.instance().socket(zmq.ROUTER)
server_public, server_secret = zmq.auth.load_certificate(server_secret_file)
server.curve_secretkey = server_secret
server.curve_publickey = server_public
server.curve_server = True # must come before bind
server.bind(endpoint)
server_stream = zmqstream.ZMQStream(server)
# simple echo
server_stream.on_recv_stream(echo)
return server_stream
def client_msg_recvd(msg: List[bytes]):
logging.debug("client recvd %s", msg)
logging.info("Ironhouse test OK")
# stop the loop when we get the reply
ioloop.IOLoop.current().stop()
def setup_client(
client_secret_file: str,
server_public_file: str,
endpoint: str = 'tcp://127.0.0.1:9000',
):
"""setup a simple client with CURVE auth"""
client = zmq.Context.instance().socket(zmq.DEALER)
# We need two certificates, one for the client and one for
# the server. The client must know the server's public key
# to make a CURVE connection.
client_public, client_secret = zmq.auth.load_certificate(client_secret_file)
client.curve_secretkey = client_secret
client.curve_publickey = client_public
server_public, _ = zmq.auth.load_certificate(server_public_file)
# The client must know the server's public key to make a CURVE connection.
client.curve_serverkey = server_public
client.connect(endpoint)
client_stream = zmqstream.ZMQStream(client)
client_stream.on_recv(client_msg_recvd)
return client_stream
async def METHOD_NAME() -> None:
'''Run Ironhouse example'''
# These directories are generated by the generate_certificates script
base_dir = os.path.dirname(__file__)
keys_dir = os.path.join(base_dir, 'certificates')
public_keys_dir = os.path.join(base_dir, 'public_keys')
secret_keys_dir = os.path.join(base_dir, 'private_keys')
if not (
os.path.exists(keys_dir)
and os.path.exists(public_keys_dir)
and os.path.exists(secret_keys_dir)
):
logging.critical(
"Certificates are missing - run generate_certificates script first"
)
sys.exit(1)
# Start an authenticator for this context.
auth = AsyncioAuthenticator()
auth.allow('127.0.0.1')
# Tell authenticator to use the certificate in a directory
auth.configure_curve(domain='*', location=public_keys_dir)
server_secret_file = os.path.join(secret_keys_dir, "server.key_secret")
server = setup_server(server_secret_file)
server_public_file = os.path.join(public_keys_dir, "server.key")
client_secret_file = os.path.join(secret_keys_dir, "client.key_secret")
client = setup_client(client_secret_file, server_public_file)
client.send(b'Hello')
auth.start()
if __name__ == '__main__':
if zmq.zmq_version_info() < (4, 0):
raise RuntimeError(
"Security is not supported in libzmq version < 4.0. libzmq version {}".format(
zmq.zmq_version()
)
)
if '-v' in sys.argv:
level = logging.DEBUG
else:
level = logging.INFO
logging.basicConfig(level=level, format="[%(levelname)s] %(message)s")
loop = asyncio.new_event_loop()
loop.create_task(METHOD_NAME())
loop.run_forever() |
299,827 | provisioning state | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetVirtualWANResult',
'AwaitableGetVirtualWANResult',
'get_virtual_wan',
'get_virtual_wan_output',
]
@pulumi.output_type
class GetVirtualWANResult:
"""
VirtualWAN Resource.
"""
def __init__(__self__, disable_vpn_encryption=None, etag=None, id=None, location=None, name=None, METHOD_NAME=None, tags=None, type=None, virtual_hubs=None, vpn_sites=None):
if disable_vpn_encryption and not isinstance(disable_vpn_encryption, bool):
raise TypeError("Expected argument 'disable_vpn_encryption' to be a bool")
pulumi.set(__self__, "disable_vpn_encryption", disable_vpn_encryption)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", METHOD_NAME)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hubs and not isinstance(virtual_hubs, list):
raise TypeError("Expected argument 'virtual_hubs' to be a list")
pulumi.set(__self__, "virtual_hubs", virtual_hubs)
if vpn_sites and not isinstance(vpn_sites, list):
raise TypeError("Expected argument 'vpn_sites' to be a list")
pulumi.set(__self__, "vpn_sites", vpn_sites)
@property
@pulumi.getter(name="disableVpnEncryption")
def disable_vpn_encryption(self) -> Optional[bool]:
"""
Vpn encryption to be disabled or not.
"""
return pulumi.get(self, "disable_vpn_encryption")
@property
@pulumi.getter
def etag(self) -> str:
"""
Gets a unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def METHOD_NAME(self) -> str:
"""
The provisioning state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHubs")
def virtual_hubs(self) -> Sequence['outputs.SubResourceResponse']:
"""
List of VirtualHubs in the VirtualWAN.
"""
return pulumi.get(self, "virtual_hubs")
@property
@pulumi.getter(name="vpnSites")
def vpn_sites(self) -> Sequence['outputs.SubResourceResponse']:
return pulumi.get(self, "vpn_sites")
class AwaitableGetVirtualWANResult(GetVirtualWANResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualWANResult(
disable_vpn_encryption=self.disable_vpn_encryption,
etag=self.etag,
id=self.id,
location=self.location,
name=self.name,
METHOD_NAME=self.METHOD_NAME,
tags=self.tags,
type=self.type,
virtual_hubs=self.virtual_hubs,
vpn_sites=self.vpn_sites)
def get_virtual_wan(resource_group_name: Optional[str] = None,
virtual_wan_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualWANResult:
"""
Retrieves the details of a VirtualWAN.
:param str resource_group_name: The resource group name of the VirtualWan.
:param str virtual_wan_name: The name of the VirtualWAN being retrieved.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['virtualWANName'] = virtual_wan_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20180701:getVirtualWAN', __args__, opts=opts, typ=GetVirtualWANResult).value
return AwaitableGetVirtualWANResult(
disable_vpn_encryption=pulumi.get(__ret__, 'disable_vpn_encryption'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
location=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
METHOD_NAME=pulumi.get(__ret__, 'provisioning_state'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'),
virtual_hubs=pulumi.get(__ret__, 'virtual_hubs'),
vpn_sites=pulumi.get(__ret__, 'vpn_sites'))
@_utilities.lift_output_func(get_virtual_wan)
def get_virtual_wan_output(resource_group_name: Optional[pulumi.Input[str]] = None,
virtual_wan_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVirtualWANResult]:
"""
Retrieves the details of a VirtualWAN.
:param str resource_group_name: The resource group name of the VirtualWan.
:param str virtual_wan_name: The name of the VirtualWAN being retrieved.
"""
... |
299,828 | test duplicate params | # --------------------------------------------------------------------------
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the ""Software""), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# --------------------------------------------------------------------------
from pagingversiontolerant import AutoRestPagingTestService
from custombaseurlpagingversiontolerant import AutoRestParameterizedHostTestPagingClient
from azure.core.exceptions import HttpResponseError
import pytest
@pytest.fixture
def client():
with AutoRestPagingTestService() as client:
yield client
@pytest.fixture
def custom_url_client():
with AutoRestParameterizedHostTestPagingClient(host="host:3000") as client:
yield client
def test_get_no_item_name_pages(client):
pages = client.paging.get_no_item_name_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_null_next_link_name_pages(client):
pages = client.paging.get_null_next_link_name_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_empty_next_link_name_pages(client):
pages = client.paging.get_empty_next_link_name_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_single_pages_with_cb(client):
def cb(list_of_obj):
for obj in list_of_obj:
obj["marked"] = True
return list_of_obj
pages = client.paging.get_single_pages(cls=cb)
assert all(obj["marked"] for obj in pages)
def test_get_single_pages(client):
pages = client.paging.get_single_pages()
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_single_pages_with_body_params(client):
pages = client.paging.get_single_pages_with_body_params({"name": "body"})
items = [i for i in pages]
assert len(items) == 1
assert items[0]["properties"]["id"] == 1
assert items[0]["properties"]["name"] == "Product"
def test_get_multiple_pages(client):
pages = client.paging.get_multiple_pages()
items = [i for i in pages]
assert len(items) == 10
def test_query_params(client):
pages = client.paging.get_with_query_params(required_query_parameter='100')
items = [i for i in pages]
assert len(items) == 2
def test_get_odata_multiple_pages(client):
pages = client.paging.get_odata_multiple_pages()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_retry_first(client):
pages = client.paging.get_multiple_pages_retry_first()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_retry_second(client):
pages = client.paging.get_multiple_pages_retry_second()
items = [i for i in pages]
assert len(items) == 10
def test_get_multiple_pages_with_offset(client):
pages = client.paging.get_multiple_pages_with_offset(offset=100)
items = [i for i in pages]
assert len(items) == 10
assert items[-1]["properties"]["id"] == 110
def test_get_single_pages_failure(client):
pages = client.paging.get_single_pages_failure()
with pytest.raises(HttpResponseError):
list(pages)
def test_get_multiple_pages_failure(client):
pages = client.paging.get_multiple_pages_failure()
with pytest.raises(HttpResponseError):
list(pages)
def test_get_multiple_pages_failure_uri(client):
pages = client.paging.get_multiple_pages_failure_uri()
with pytest.raises(HttpResponseError):
list(pages)
def test_paging_fragment_path(client):
pages = client.paging.get_multiple_pages_fragment_next_link(api_version="1.6", tenant="test_user")
items = [i for i in pages]
assert len(items) == 10
with pytest.raises(AttributeError):
# Be sure this method is not generated (Transform work)
client.paging.get_multiple_pages_fragment_next_link_next() # pylint: disable=E1101
def test_custom_url_get_pages_partial_url(custom_url_client):
paged = list(custom_url_client.paging.get_pages_partial_url("local"))
assert len(paged) == 2
assert paged[0]["properties"]["id"] == 1
assert paged[1]["properties"]["id"] == 2
def test_custom_url_get_pages_partial_url_operation(custom_url_client):
paged = list(custom_url_client.paging.get_pages_partial_url_operation("local"))
assert len(paged) == 2
assert paged[0]["properties"]["id"] == 1
assert paged[1]["properties"]["id"] == 2
def test_get_multiple_pages_lro(client):
"""LRO + Paging at the same time.
"""
from azure.mgmt.core.polling.arm_polling import ARMPolling
poller = client.paging.begin_get_multiple_pages_lro(polling=ARMPolling(timeout=0, request_id="test"))
pager = poller.result()
items = list(pager)
assert len(items) == 10
assert items[0]["properties"]["id"] == 1
assert items[1]["properties"]["id"] == 2
def test_item_name_with_xms_client_name(client):
pages = client.paging.get_paging_model_with_item_name_with_xms_client_name()
items = [i for i in pages]
assert len(items) == 1
def test_initial_response_no_items(client):
pages = client.paging.first_response_empty()
items = [i for i in pages]
assert len(items) == 1
def METHOD_NAME(client):
pages = list(client.paging.duplicate_params(filter="foo"))
assert len(pages) == 1
assert pages[0]["properties"]["id"] == 1
assert pages[0]["properties"]["name"] == "Product"
def test_dont_send_maxpagesize(client):
list(client.paging.page_with_max_page_size())
def test_append_api_version(client):
pages = list(client.paging.append_api_version())
assert len(pages) == 1
assert pages[0]["properties"]["id"] == 1
assert pages[0]["properties"]["name"] == "Product"
def test_replace_api_version(client):
pages = list(client.paging.replace_api_version())
assert len(pages) == 1
assert pages[0]["properties"]["id"] == 1
assert pages[0]["properties"]["name"] == "Product" |
299,829 | test viz relation | import unittest
import tests.secrets as sct
from nlu import *
class TestViz(unittest.TestCase):
def test_quick(self):
# TODO special Resolver RULE :
# Resolvers require DOC TYPE input!! But this Doc Type Input should come from a NER_CHUNK!! So we must convert NER_CHUNK with Chunk2Doc.
# But then we must also make sure that the Sentence Embeddings for the Resolve are generated from the Doc2Chunk Col!!! Argh
# But lol JK this annotator realy just takes sentence Embeds.... Each SentenceEmbed should originate from a NER that was converter with Chunk2Doc
nlu_ref = "med_ner.jsl.wip.clinical en.resolve.icd10cm.augmented"
data = "This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD , gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret's Center for Women & Infants for cardiac catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction , subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU ."
SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
JSL_SECRET = sct.JSL_SECRET
nlu.auth(
SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, JSL_SECRET
)
pipe = nlu.load(nlu_ref, verbose=True)
df = pipe.predict(data)
for c in df.columns:
print(df[c])
def test_viz_ner(self):
pipe = nlu.load("ner.conll", verbose=True)
data = "Donald Trump and Angela Merkel from Germany don't share many oppinions!"
"""If there are multiple components we chould VIZ from, we need to define what takes prescedence"""
viz = pipe.viz(data, viz_type="ner")
print(viz)
print(viz)
print(viz)
def test_viz_dep(self):
pipe = nlu.load("dep.typed", verbose=True)
data = "Donald Trump and Angela Merkel from Germany don't share many oppinions!"
viz = pipe.viz(data, viz_type="dep")
print(viz)
print(viz)
print(viz)
# Deprecated
# def test_viz_resolution_chunk(self):
# nlu_ref = 'en.resolve_chunk.icd10cm.neoplasms'
# data = """The patient is a 5-month-old infant who presented initially on Monday with a cold, cough, and runny nose for 2 days. Mom states she had no fever. Her appetite was good but she was spitting up a lot. She had no difficulty breathing and her cough was described as dry and hacky. At that time, physical exam showed a right TM, which was red. Left TM was okay. She was fairly congested but looked happy and playful. She was started on Amoxil and Aldex and we told to recheck in 2 weeks to recheck her ear. Mom returned to clinic again today because she got much worse overnight. She was having difficulty breathing. She was much more congested and her appetite had decreased significantly today. She also spiked a temperature yesterday of 102.6 and always having trouble sleeping secondary to congestion."""
#
# SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
# AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
# AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
# JSL_SECRET = sct.JSL_SECRET
#
# nlu.auth(SPARK_NLP_LICENSE,AWS_ACCESS_KEY_ID,AWS_SECRET_ACCESS_KEY,JSL_SECRET)
# pipe = nlu.load(nlu_ref,verbose=True)
# viz = pipe.viz(data,viz_type='resolution')
# print(viz)
#
def test_viz_resolution_sentence(self):
nlu_ref = "med_ner.jsl.wip.clinical en.resolve.icd10cm.augmented"
data = "This is an 82 - year-old male with a history of prior tobacco use , hypertension , chronic renal insufficiency , COPD , gastritis , and TIA who initially presented to Braintree with a non-ST elevation MI and Guaiac positive stools , transferred to St . Margaret's Center for Women & Infants for cardiac catheterization with PTCA to mid LAD lesion complicated by hypotension and bradycardia requiring Atropine , IV fluids and transient dopamine possibly secondary to vagal reaction , subsequently transferred to CCU for close monitoring , hemodynamically stable at the time of admission to the CCU ."
SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
JSL_SECRET = sct.JSL_SECRET
nlu.auth(
SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, JSL_SECRET
)
pipe = nlu.load(nlu_ref, verbose=True)
viz = pipe.viz(data, viz_type="resolution")
print(viz)
def METHOD_NAME(self):
nlu_ref = "med_ner.jsl.wip.clinical relation.temporal_events"
data = "He was advised chest X-ray or CT scan after checking his SpO2 which was <= 93%"
SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
JSL_SECRET = sct.JSL_SECRET
nlu.auth(
SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, JSL_SECRET
)
pipe = nlu.load(nlu_ref, verbose=True)
viz = pipe.viz(data, viz_type="relation")
print(viz)
def test_viz_assertion(self):
nlu_ref = "med_ner.jsl.wip.clinical assert"
data = "The patient was tested for cancer, but none was detected, he is free of cancer."
SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
JSL_SECRET = sct.JSL_SECRET
nlu.auth(
SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, JSL_SECRET
)
pipe = nlu.load(nlu_ref, verbose=True)
viz = pipe.viz(data, viz_type="assert")
print(viz)
def test_infer_viz_type(self):
SPARK_NLP_LICENSE = sct.SPARK_NLP_LICENSE
AWS_ACCESS_KEY_ID = sct.AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY = sct.AWS_SECRET_ACCESS_KEY
JSL_SECRET = sct.JSL_SECRET
nlu.auth(
SPARK_NLP_LICENSE, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, JSL_SECRET
)
nlu_ref = "med_ner.jsl.wip.clinical assert"
data = "The patient was tested for cancer, but none was detected, he is free of cancer."
pipe = nlu.load(nlu_ref)
pipe.viz(data)
if __name__ == "__main__":
unittest.main() |
299,830 | location | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetBackupVaultResult',
'AwaitableGetBackupVaultResult',
'get_backup_vault',
'get_backup_vault_output',
]
@pulumi.output_type
class GetBackupVaultResult:
"""
Backup Vault Resource
"""
def __init__(__self__, e_tag=None, id=None, identity=None, METHOD_NAME=None, name=None, properties=None, system_data=None, tags=None, type=None):
if e_tag and not isinstance(e_tag, str):
raise TypeError("Expected argument 'e_tag' to be a str")
pulumi.set(__self__, "e_tag", e_tag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", METHOD_NAME)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="eTag")
def e_tag(self) -> Optional[str]:
"""
Optional ETag.
"""
return pulumi.get(self, "e_tag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id represents the complete path to the resource.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.DppIdentityDetailsResponse']:
"""
Input Managed Identity Details
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def METHOD_NAME(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name associated with the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> 'outputs.BackupVaultResponse':
"""
BackupVaultResource properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Metadata pertaining to creation and last modification of the resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type represents the complete path of the form Namespace/ResourceType/ResourceType/...
"""
return pulumi.get(self, "type")
class AwaitableGetBackupVaultResult(GetBackupVaultResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetBackupVaultResult(
e_tag=self.e_tag,
id=self.id,
identity=self.identity,
METHOD_NAME=self.METHOD_NAME,
name=self.name,
properties=self.properties,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_backup_vault(resource_group_name: Optional[str] = None,
vault_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetBackupVaultResult:
"""
Returns a resource belonging to a resource group.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vault_name: The name of the backup vault.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['vaultName'] = vault_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:dataprotection/v20230401preview:getBackupVault', __args__, opts=opts, typ=GetBackupVaultResult).value
return AwaitableGetBackupVaultResult(
e_tag=pulumi.get(__ret__, 'e_tag'),
id=pulumi.get(__ret__, 'id'),
identity=pulumi.get(__ret__, 'identity'),
METHOD_NAME=pulumi.get(__ret__, 'location'),
name=pulumi.get(__ret__, 'name'),
properties=pulumi.get(__ret__, 'properties'),
system_data=pulumi.get(__ret__, 'system_data'),
tags=pulumi.get(__ret__, 'tags'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_backup_vault)
def get_backup_vault_output(resource_group_name: Optional[pulumi.Input[str]] = None,
vault_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetBackupVaultResult]:
"""
Returns a resource belonging to a resource group.
:param str resource_group_name: The name of the resource group. The name is case insensitive.
:param str vault_name: The name of the backup vault.
"""
... |
299,831 | set up | from array import array
from tests.QtTestCase import QtTestCase
from urh.controller.dialogs.SimulatorDialog import SimulatorDialog
from urh.dev.BackendHandler import BackendContainer, Backends
from urh.signalprocessing.Participant import Participant
from urh.simulator.SimulatorMessage import SimulatorMessage
class TestSimulatorDialog(QtTestCase):
def METHOD_NAME(self):
super().METHOD_NAME()
alice = Participant("Alice", "A")
bob = Participant("Bob", "B")
alice.simulate = True
bob.simulate = True
self.form.project_manager.participants.append(alice)
self.form.project_manager.participants.append(bob)
self.form.project_manager.project_updated.emit()
mt = self.form.compare_frame_controller.proto_analyzer.default_message_type
msg1 = SimulatorMessage(source=bob, destination=alice, plain_bits=array("B", [1, 0, 1, 1]), pause=100, message_type=mt)
msg2 = SimulatorMessage(source=alice, destination=bob, plain_bits=array("B", [1, 0, 1, 1]), pause=100, message_type=mt)
simulator_manager = self.form.simulator_tab_controller.simulator_config
simulator_manager.add_items([msg1, msg2], 0, simulator_manager.rootItem)
simulator_manager.add_label(5, 15, "test", parent_item=simulator_manager.rootItem.children[0])
self.dialog = SimulatorDialog(self.form.simulator_tab_controller.simulator_config,
self.form.generator_tab_controller.modulators,
self.form.simulator_tab_controller.sim_expression_parser,
self.form.project_manager)
if self.SHOW:
self.dialog.show()
def test_set_rx_parameters(self):
rx_settings_widget = self.dialog.device_settings_rx_widget
bh = BackendContainer("test", {Backends.native}, True, True)
self.assertTrue(bh.is_enabled)
rx_settings_widget.backend_handler.device_backends["test"] = bh
rx_settings_widget.ui.cbDevice.addItem("test")
rx_settings_widget.ui.cbDevice.setCurrentText("test")
self.assertEqual(rx_settings_widget.device.name, "test")
self.assertEqual(rx_settings_widget.device.backend, Backends.native)
simulator = self.dialog.simulator
self.__edit_spinbox_value(rx_settings_widget.ui.spinBoxFreq, 500e6)
self.assertEqual(simulator.sniffer.rcv_device.frequency, 500e6)
self.__edit_spinbox_value(rx_settings_widget.ui.spinBoxSampleRate, 4e6)
self.assertEqual(simulator.sniffer.rcv_device.sample_rate, 4e6)
self.__edit_spinbox_value(rx_settings_widget.ui.spinBoxBandwidth, 5e6)
self.assertEqual(simulator.sniffer.rcv_device.bandwidth, 5e6)
self.__edit_spinbox_value(rx_settings_widget.ui.spinBoxGain, 15)
self.assertEqual(simulator.sniffer.rcv_device.gain, 15)
self.__edit_spinbox_value(rx_settings_widget.ui.spinBoxIFGain, 10)
self.assertEqual(simulator.sniffer.rcv_device.if_gain, 10)
self.__edit_spinbox_value(rx_settings_widget.ui.spinBoxBasebandGain, 11)
self.assertEqual(simulator.sniffer.rcv_device.baseband_gain, 11)
self.__edit_spinbox_value(rx_settings_widget.ui.spinBoxFreqCorrection, 22)
self.assertEqual(simulator.sniffer.rcv_device.freq_correction, 22)
rx_settings_widget.ui.lineEditIP.setText("4.4.4.4")
rx_settings_widget.ui.lineEditIP.editingFinished.emit()
self.assertEqual(simulator.sniffer.rcv_device.ip, "4.4.4.4")
def test_set_sniff_parameters(self):
sniff_settings_widget = self.dialog.sniff_settings_widget
simulator = self.dialog.simulator
self.__edit_spinbox_value(sniff_settings_widget.ui.spinbox_sniff_SamplesPerSymbol, 111)
self.assertEqual(simulator.sniffer.signal.samples_per_symbol, 111)
self.__edit_spinbox_value(sniff_settings_widget.ui.spinbox_sniff_Center, 0.1337)
self.assertEqual(simulator.sniffer.signal.center, 0.1337)
self.__edit_spinbox_value(sniff_settings_widget.ui.spinBoxCenterSpacing, 0.4)
self.assertEqual(simulator.sniffer.signal.center_spacing, 0.4)
self.__edit_spinbox_value(sniff_settings_widget.ui.spinbox_sniff_ErrorTolerance, 13)
self.assertEqual(simulator.sniffer.signal.tolerance, 13)
self.__edit_spinbox_value(sniff_settings_widget.ui.spinbox_sniff_Noise, 0.1234)
self.assertEqual(simulator.sniffer.signal.noise_threshold_relative, 0.1234)
sniff_settings_widget.ui.combox_sniff_Modulation.setCurrentText("PSK")
self.assertEqual(simulator.sniffer.signal.modulation_type, "PSK")
self.__edit_spinbox_value(sniff_settings_widget.ui.spinBoxBitsPerSymbol, 5)
self.assertEqual(simulator.sniffer.signal.bits_per_symbol, 5)
decodings = [sniff_settings_widget.ui.comboBox_sniff_encoding.itemText(i) for i in
range(sniff_settings_widget.ui.comboBox_sniff_encoding.count())]
sniff_settings_widget.ui.comboBox_sniff_encoding.setCurrentIndex(2)
self.assertEqual(simulator.sniffer.decoder.name, decodings[2])
def test_set_tx_parameters(self):
tx_settings_widget = self.dialog.device_settings_tx_widget
simulator = self.dialog.simulator
bh = BackendContainer("test", {Backends.native}, True, True)
self.assertTrue(bh.is_enabled)
tx_settings_widget.backend_handler.device_backends["test"] = bh
tx_settings_widget.ui.cbDevice.addItem("test")
tx_settings_widget.ui.cbDevice.setCurrentText("test")
self.assertEqual(tx_settings_widget.device.name, "test")
self.assertEqual(tx_settings_widget.device.backend, Backends.native)
self.__edit_spinbox_value(tx_settings_widget.ui.spinBoxFreq, 300e6)
self.assertEqual(simulator.sender.device.frequency, 300e6)
self.__edit_spinbox_value(tx_settings_widget.ui.spinBoxSampleRate, 5e6)
self.assertEqual(simulator.sender.device.sample_rate, 5e6)
self.__edit_spinbox_value(tx_settings_widget.ui.spinBoxBandwidth, 3e6)
self.assertEqual(simulator.sender.device.bandwidth, 3e6)
self.__edit_spinbox_value(tx_settings_widget.ui.spinBoxGain, 16)
self.assertEqual(simulator.sender.device.gain, 16)
self.__edit_spinbox_value(tx_settings_widget.ui.spinBoxIFGain, 13)
self.assertEqual(simulator.sender.device.if_gain, 13)
self.__edit_spinbox_value(tx_settings_widget.ui.spinBoxBasebandGain, 10)
self.assertEqual(simulator.sender.device.baseband_gain, 10)
self.__edit_spinbox_value(tx_settings_widget.ui.spinBoxFreqCorrection, 33)
self.assertEqual(simulator.sender.device.freq_correction, 33)
tx_settings_widget.ui.lineEditIP.setText("1.2.6.2")
tx_settings_widget.ui.lineEditIP.editingFinished.emit()
self.assertEqual(simulator.sender.device.ip, "1.2.6.2")
def __edit_spinbox_value(self, spinbox, value):
spinbox.setValue(value)
spinbox.editingFinished.emit() |
299,832 | find matches | from torch.fx import (
GraphModule,
Node,
map_arg
)
from torch.fx.graph import Graph
from .match_utils import (
_is_match,
MatchAllNode,
)
from .pattern_utils import (
_sorted_patterns_dict,
)
from ..backend_config import (
BackendConfig,
get_native_backend_config,
)
from ..backend_config.utils import (
get_fuser_method_mapping,
get_fusion_pattern_to_root_node_getter,
get_fusion_pattern_to_extra_inputs_getter,
)
from .custom_config import FuseCustomConfig
from .fuse_handler import (
_get_fusion_pattern_to_fuse_handler_cls,
FuseHandler,
)
from typing import Any, Callable, Dict, List, Tuple, Union
import warnings
from torch.ao.quantization.utils import Pattern, NodePattern
__all__ = [
"fuse",
# TODO: We should make this private in the future
# This is currently needed for test_public_bindings for some reason
"FuseHandler",
]
def fuse(
model: GraphModule,
is_qat: bool,
fuse_custom_config: Union[FuseCustomConfig, Dict[str, Any], None] = None,
backend_config: Union[BackendConfig, Dict[str, Any], None] = None,
) -> GraphModule:
if fuse_custom_config is None:
fuse_custom_config = FuseCustomConfig()
if isinstance(fuse_custom_config, Dict):
warnings.warn(
"Passing a fuse_custom_config_dict to fuse is deprecated and will not be supported "
"in a future version. Please pass in a FuseCustomConfig instead.")
fuse_custom_config = FuseCustomConfig.from_dict(fuse_custom_config)
if isinstance(backend_config, Dict):
warnings.warn(
"Passing a backend_config_dict to prepare is deprecated and will not be supported "
"in a future version. Please pass in a BackendConfig instead.")
backend_config = BackendConfig.from_dict(backend_config)
named_modules = dict(model.named_modules())
if backend_config is None:
backend_config = get_native_backend_config()
fusion_pattern_to_fuse_handler_cls = _sorted_patterns_dict(_get_fusion_pattern_to_fuse_handler_cls(backend_config))
fuser_method_mapping = get_fuser_method_mapping(backend_config)
fusion_pattern_to_root_node_getter = get_fusion_pattern_to_root_node_getter(backend_config)
fusion_pattern_to_extra_inputs_getter = get_fusion_pattern_to_extra_inputs_getter(backend_config)
# find fusion
fusion_pairs = METHOD_NAME(
model, model.graph, fusion_pattern_to_fuse_handler_cls)
# TODO: change this to inplace changes to graph, since we no longer construct
# new GraphModule anymore
fused_graph = Graph()
env: Dict[Any, Any] = {}
def load_arg(a):
return map_arg(a, lambda node: env[node.name])
def default_root_node_getter(node_pattern):
while not isinstance(node_pattern[-1], Node):
node_pattern = node_pattern[-1]
return node_pattern[-1]
for node in model.graph.nodes:
maybe_last_node, pattern, matched_node_pattern, obj, node_to_subpattern = \
fusion_pairs.get(node.name, (None, None, None, None, None))
# get the corresponding subpattern for the current node
if node_to_subpattern is not None:
node_subpattern = node_to_subpattern.get(node, None)
else:
node_subpattern = None
if maybe_last_node is node:
assert obj is not None
root_node_getter = fusion_pattern_to_root_node_getter.get(pattern, default_root_node_getter)
root_node = root_node_getter(matched_node_pattern) # type: ignore[index]
extra_inputs_getter = fusion_pattern_to_extra_inputs_getter.get(pattern, None)
extra_inputs = []
if extra_inputs_getter is not None:
extra_inputs = extra_inputs_getter(matched_node_pattern)
# TODO: add validation that root_node is a module and has the same type
# as the root_module in the configuration
env[node.name] = obj.fuse(
load_arg, named_modules, fused_graph, root_node, extra_inputs, matched_node_pattern, # type: ignore[arg-type]
fuse_custom_config, fuser_method_mapping, is_qat)
elif maybe_last_node is None or node_subpattern is MatchAllNode:
env[node.name] = fused_graph.node_copy(node, load_arg)
# node matched in patterns and is not root is removed here
model = GraphModule(model, fused_graph)
return model
def METHOD_NAME(
root: GraphModule,
graph: Graph,
pattern_to_fuse_handler_cls: Dict[Pattern, Callable],
) -> Dict[str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]]:
modules = dict(root.named_modules())
# node name -> (root_node, match_value)
match_map : Dict[
str, Tuple[Node, Pattern, NodePattern, FuseHandler, Dict[Node, Any]]] = {}
# a map from node to the matched subpattern
node_to_subpattern: Dict[Node, Any] = {}
# TODO: dedup with quantization matching function in match_utils.py
def apply_match(pattern, node, match, matched_node_pattern, node_to_subpattern):
if isinstance(pattern, tuple):
s, *args = pattern
current_node_pattern: List[Node] = []
apply_match(s, node, match, current_node_pattern, node_to_subpattern)
for subpattern, arg in zip(args, node.args):
apply_match(subpattern, arg, match, current_node_pattern, node_to_subpattern)
matched_node_pattern.append(tuple(current_node_pattern))
else:
# the first pattern matches will take precedence
if node.name not in match_map:
matched_node_pattern.append(node)
# MatchAllNode here is actually MatchAllInputNode which should not
# be added to match_map
if pattern is not MatchAllNode:
node_to_subpattern[node] = pattern
root_node, pattern, handler = match
match_map[node.name] = (root_node, pattern, matched_node_pattern, handler, node_to_subpattern)
for node in reversed(graph.nodes):
if node.name not in match_map:
for pattern, fuse_handler_cls in pattern_to_fuse_handler_cls.items():
matched_node_pattern: List[Node] = []
if _is_match(modules, node, pattern):
apply_match(pattern, node, (node, pattern, fuse_handler_cls(node)), matched_node_pattern, node_to_subpattern)
break
return match_map |
299,833 | cli | #!/usr/bin/env python
import time
import click
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
@click.group()
def METHOD_NAME():
"""This script checks whether data tables on hepdata.net can be loaded
correctly. It uses Selenium WebDriver on chrome; please ensure you have
installed Chrome and ChromeDriver before running this script.
"""
@METHOD_NAME.command()
@click.option('--start-page', '-s', default=1,
help='The first page to check')
@click.option('--end-page', '-e', default=1,
help='The last page to check (must be >= start-page)')
@click.option('--max-tables', '-m', default=5,
help='Maximum number of data tables to check (-1 for all)')
@click.option('--username', '-u',
help='HEPData username (email address) to log in '
'to increase rate limits')
def check_by_page(start_page, end_page, max_tables, username):
"""Checks specified pages of search results (in the default order,
i.e. latest first), clicks through to each submission, and loads each of
the first max_table data tables in turn."""
if end_page < start_page:
click.echo("end-page must be greater than or equal to start-page")
exit(1)
click.echo("Checking from page %s to %s of search results"
% (start_page, end_page))
driver = _get_driver(username)
for page in range(int(start_page), int(end_page) + 1):
click.echo("Checking page %s of search results" % page)
driver.get('https://www.hepdata.net/search/?page=%s' % page)
record_links = driver.find_elements(By.CSS_SELECTOR,
'.search-result-item .record-header a'
)
urls = []
for link in record_links:
urls.append(link.get_attribute('href'))
for url in urls:
_check_url(url, 1, max_tables, driver)
@METHOD_NAME.command()
@click.option('--record-url', '-r', type=str,
help='Record URL to check for data tables')
@click.option('--start-table', '-s', default=1,
help='The first table to check')
@click.option('--end-table', '-e', default=1,
help='The last table to check (must be >= start-table)')
@click.option('--username', '-u',
help='HEPData username (email address) to log in '
'to increase rate limits')
def check_url(record_url, start_table, end_table, username):
"""Checks the given URL and loads each data table from start_table
to end_table. It is recommended to check no more than around 40 tables
at once."""
if end_table < start_table:
click.echo("end-table must be greater than or equal to start-table")
exit(1)
driver = _get_driver(username)
_check_url(record_url, start_table, end_table, driver)
def _check_url(url, start_table, end_table, driver):
click.echo("Checking URL %s" % url)
driver.get(url)
table_links = driver.find_elements(By.CSS_SELECTOR, '#table-list li')
table_links = table_links[start_table-1:end_table]
for i, table_link in enumerate(table_links):
click.echo("Checking table *%s*"
% table_link.text.split('\n')[0])
actions = ActionChains(driver)
actions.move_to_element(table_link).perform()
table_link.click()
try:
wait = WebDriverWait(driver, 20)
wait.until(EC.visibility_of_element_located(
(By.ID, "hepdata_table_content"))
)
click.echo("Loaded")
except TimeoutException:
click.echo("***** Missing table at %s: *****" % url)
for el in driver.find_elements(By.ID, "hepdata_table_loading_failed"):
click.echo("***** %s *****" % el.text)
def _get_driver(username=None):
password = None
if username:
password = click.prompt("Enter password for user %s" % username,
hide_input=True)
options = webdriver.ChromeOptions()
options.add_argument("--headless")
driver = webdriver.Chrome(options=options,
executable_path=r'/usr/local/bin/chromedriver')
driver.set_window_size(1120, 550)
driver.get('https://www.hepdata.net/search/')
wait = WebDriverWait(driver, 10)
wait.until(EC.visibility_of_element_located(
(By.CSS_SELECTOR, ".cc_btn_accept_all")
))
time.sleep(1)
cookie_accept_btn = driver.find_element(By.CSS_SELECTOR, ".cc_btn_accept_all")
cookie_accept_btn.click()
if username and password:
click.echo("Logging in...")
driver.get('https://www.hepdata.net/login/')
login_form = driver.find_element(By.NAME, 'login_user_form')
login_form.find_element(By.NAME, 'email').send_keys(username)
login_form.find_element(By.NAME, 'password').send_keys(password)
login_form.submit()
return driver
if __name__ == '__main__':
METHOD_NAME() |
299,834 | guess other chan axis | # Authors: George O'Neill <g.o'neill@ucl.ac.uk>
#
# License: BSD-3-Clause
from copy import deepcopy
import numpy as np
from ...utils import logger
def _refine_sensor_orientation(chanin):
"""Improve orientation matrices based on multiaxis measures.
The ex and ey elements from _convert_channel_info were oriented not
based on the physical orientation of the sensor.
It doesn't have to be this way, we can use (if available) the orientation
information from mulit-axis recordings to refine these elements.
"""
logger.info("Refining sensor orientations...")
chanout = deepcopy(chanin)
tmpname = list()
for ii in range(len(chanin)):
tmpname.append(chanin[ii]["ch_name"])
for ii in range(len(chanin)):
tmploc = deepcopy(chanin[ii]["loc"])
tmploc = tmploc.reshape(3, 4, order="F")
if np.isnan(tmploc.sum()) is False:
target, flipFlag = METHOD_NAME(tmpname, ii)
if np.isnan(target) is False:
targetloc = deepcopy(chanin[target]["loc"])
if np.isnan(targetloc.sum()) is False:
targetloc = targetloc.reshape(3, 4, order="F")
tmploc[:, 2] = targetloc[:, 3]
tmploc[:, 1] = flipFlag * np.cross(tmploc[:, 2], tmploc[:, 3])
chanout[ii]["loc"] = tmploc.reshape(12, order="F")
logger.info("[done]")
return chanout
def METHOD_NAME(tmpname, seedID):
"""Try to guess the name of another axis of a multiaxis sensor."""
# see if its using the old RAD/TAN convention first, otherwise use XYZ
if tmpname[seedID][-3:] == "RAD":
prefix1 = "RAD"
prefix2 = "TAN"
flipflag = 1.0
elif tmpname[seedID][-3:] == "TAN":
prefix1 = "TAN"
prefix2 = "RAD"
flipflag = -1.0
elif tmpname[seedID][-1:] == "Z" or tmpname[seedID][-3:] == "[Z]":
prefix1 = "Z"
prefix2 = "Y"
flipflag = -1.0
elif tmpname[seedID][-1:] == "Y" or tmpname[seedID][-3:] == "[Y]":
prefix1 = "Y"
prefix2 = "Z"
flipflag = 1.0
elif tmpname[seedID][-1:] == "X" or tmpname[seedID][-3:] == "[X]":
prefix1 = "X"
prefix2 = "Y"
flipflag = 1.0
else:
prefix1 = "?"
prefix2 = "?"
flipflag = 1.0
target_name = tmpname[seedID][: -len(prefix1)] + prefix2
target_id = np.where([t == target_name for t in tmpname])[0]
target_id = target_id[0] if len(target_id) else np.nan
return target_id, flipflag
def _get_pos_units(pos):
"""Get the units of a point cloud.
Determines the units a point cloud of sensor positions, provides the
scale factor required to ensure the units can be converted to meters.
"""
# get rid of None elements
nppos = np.empty((0, 3))
for ii in range(0, len(pos)):
if pos[ii] is not None and sum(np.isnan(pos[ii])) == 0:
nppos = np.vstack((nppos, pos[ii]))
idrange = np.empty(shape=(0, 3))
for ii in range(0, 3):
q90, q10 = np.percentile(nppos[:, ii], [90, 10])
idrange = np.append(idrange, q90 - q10)
size = np.linalg.norm(idrange)
unit, sf = _size2units(size)
return unit, sf
def _size2units(size):
"""Convert the size returned from _get_pos_units into a physical unit."""
if size >= 0.050 and size < 0.500:
unit = "m"
sf = 1
elif size >= 0.50 and size < 5:
unit = "dm"
sf = 10
elif size >= 5 and size < 50:
unit = "cm"
sf = 100
elif size >= 50 and size < 500:
unit = "mm"
sf = 1000
else:
unit = "unknown"
sf = 1
return unit, sf
def _get_plane_vectors(ez):
"""Get two orthogonal vectors orthogonal to ez (ez will be modified).
Note: the ex and ey positions will not be realistic, this can be fixed
using _refine_sensor_orientation.
"""
assert ez.shape == (3,)
ez_len = np.sqrt(np.sum(ez * ez))
if ez_len == 0:
raise RuntimeError("Zero length normal. Cannot proceed.")
if np.abs(ez_len - np.abs(ez[2])) < 1e-5: # ez already in z-direction
ex = np.array([1.0, 0.0, 0.0])
else:
ex = np.zeros(3)
if ez[1] < ez[2]:
ex[0 if ez[0] < ez[1] else 1] = 1.0
else:
ex[0 if ez[0] < ez[2] else 2] = 1.0
ez /= ez_len
ex -= np.dot(ez, ex) * ez
ex /= np.sqrt(np.sum(ex * ex))
ey = np.cross(ez, ex)
return ex, ey |
299,835 | test single binary image | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distance transform ops."""
import pytest
import numpy as np
import tensorflow as tf
from tensorflow_addons.image import distance_transform as dist_ops
from tensorflow_addons.utils import test_utils
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.parametrize("dtype", [tf.float16, tf.float32, tf.float64])
def METHOD_NAME(dtype):
image = [
[[1], [1], [1], [1], [1]],
[[1], [1], [1], [1], [1]],
[[0], [1], [0], [1], [0]],
[[1], [0], [1], [0], [1]],
[[0], [1], [0], [1], [0]],
]
expected_output = np.array(
[
2,
2.23606801,
2,
2.23606801,
2,
1,
1.41421354,
1,
1.41421354,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
1,
0,
]
)
image = tf.constant(image, dtype=tf.uint8)
output = dist_ops.euclidean_dist_transform(image, dtype=dtype)
output_flat = tf.reshape(output, [-1])
assert output.dtype == dtype
assert output.shape == [5, 5, 1]
test_utils.assert_allclose_according_to_type(output_flat, expected_output)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.parametrize("dtype", [tf.float16, tf.float32, tf.float64])
def test_batch_binary_images(dtype):
batch_size = 3
image = [
[[0], [0], [0], [0], [0]],
[[0], [1], [1], [1], [0]],
[[0], [1], [1], [1], [0]],
[[0], [1], [1], [1], [0]],
[[0], [0], [0], [0], [0]],
]
expected_output = np.array(
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 2, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]
* batch_size
)
images = tf.constant([image] * batch_size, dtype=tf.uint8)
output = dist_ops.euclidean_dist_transform(images, dtype=dtype)
output_flat = tf.reshape(output, [-1])
assert output.shape == [batch_size, 5, 5, 1]
test_utils.assert_allclose_according_to_type(output_flat, expected_output)
@pytest.mark.with_device(["cpu", "gpu"])
@pytest.mark.parametrize("dtype", [tf.uint8, tf.int32, tf.int64])
def test_image_with_invalid_dtype(dtype):
image = [
[[1], [1], [1], [1], [1]],
[[1], [1], [1], [1], [1]],
[[0], [1], [0], [1], [0]],
[[1], [0], [1], [0], [1]],
[[0], [1], [0], [1], [0]],
]
image = tf.constant(image, dtype=tf.uint8)
with pytest.raises(TypeError, match="`dtype` must be float16, float32 or float64"):
_ = dist_ops.euclidean_dist_transform(image, dtype=dtype)
@pytest.mark.with_device(["cpu", "gpu"])
def test_all_zeros():
image = tf.zeros([10, 10], tf.uint8)
expected_output = np.zeros([10, 10])
for output_dtype in [tf.float16, tf.float32, tf.float64]:
output = dist_ops.euclidean_dist_transform(image, dtype=output_dtype)
np.testing.assert_allclose(output, expected_output)
@pytest.mark.with_device(["cpu", "gpu"])
def test_all_ones():
image = tf.ones([10, 10, 1], tf.uint8)
output = dist_ops.euclidean_dist_transform(image)
expected_output = np.full([10, 10, 1], tf.math.sqrt(tf.float32.max))
np.testing.assert_allclose(output, expected_output)
@pytest.mark.with_device(["cpu", "gpu"])
def test_multi_channels():
channels = 3
batch_size = 2048
image = [
[[0], [0], [0], [0], [0]],
[[0], [1], [1], [1], [0]],
[[0], [1], [1], [1], [0]],
[[0], [1], [1], [1], [0]],
[[0], [0], [0], [0], [0]],
]
expected_output = np.tile(
np.expand_dims(
np.array(
[
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
1,
2,
1,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
]
),
axis=-1,
),
[batch_size, 3],
)
image = np.tile(image, [1, 1, channels])
images = tf.constant([image] * batch_size, dtype=tf.uint8)
output = dist_ops.euclidean_dist_transform(images, dtype=tf.float32)
output_flat = tf.reshape(output, [-1, 3])
assert output.shape == [batch_size, 5, 5, channels]
test_utils.assert_allclose_according_to_type(output_flat, expected_output) |
299,836 | run | # Scans Wikipedia for pages using the CongBio and CongLinks
# templates, which have Bioguide IDs. Updates the 'wikipedia'
# ID field for matching Members of Congress, and for pages
# using the CongLinks template also updates a variety of
# other ID as found in the template.
import lxml.etree, re, urllib.request, urllib.parse, urllib.error
import utils, os.path
def METHOD_NAME():
# Field mapping. And which fields should be turned into integers.
# See https://en.wikipedia.org/wiki/Template:CongLinks for what's possibly available.
fieldmap = {
"congbio": "bioguide",
#"fec": "fec", # handled specially...
"govtrack": "govtrack", # for sanity checking since we definitely have this already (I caught some Wikipedia errors)
"opensecrets": "opensecrets",
"votesmart": "votesmart",
"cspan": "cspan",
}
int_fields = ("govtrack", "votesmart", "cspan")
# default to not caching
cache = utils.flags().get('cache', False)
# Load legislator files and map bioguide IDs.
y1 = utils.load_data("legislators-current.yaml")
y2 = utils.load_data("legislators-historical.yaml")
bioguides = { }
for y in y1+y2:
bioguides[y["id"]["bioguide"]] = y
# Okay now the Wikipedia stuff...
def get_matching_pages():
# Does a Wikipedia API search for pages containing either of the
# two templates. Returns the pages.
page_titles = set()
for template in ("CongLinks", "CongBio"):
eicontinue = ""
while True:
# construct query URL, using the "eicontinue" of the last query to get the next batch
url = 'http://en.wikipedia.org/w/api.php?action=query&list=embeddedin&eititle=Template:%s&eilimit=500&format=xml' % template
if eicontinue: url += "&eicontinue=" + eicontinue
# load the XML
print("Getting %s pages (%d...)" % (template, len(page_titles)))
dom = lxml.etree.fromstring(utils.download(url, None, True)) # can't cache eicontinue probably
for pgname in dom.xpath("query/embeddedin/ei/@title"):
page_titles.add(pgname)
# get the next eicontinue value and loop
eicontinue = dom.xpath("string(query-continue/embeddedin/@eicontinue)")
if not eicontinue: break
return page_titles
# Get the list of Wikipedia pages that use any of the templates we care about.
page_list_cache_file = os.path.join(utils.cache_dir(), "legislators/wikipedia/page_titles")
if cache and os.path.exists(page_list_cache_file):
# Load from cache.
matching_pages = open(page_list_cache_file).read().split("\n")
else:
# Query Wikipedia API and save to cache.
matching_pages = get_matching_pages()
utils.write(("\n".join(matching_pages)), page_list_cache_file)
# Filter out things that aren't actually pages (User:, Talk:, etcetera, anything with a colon).
matching_pages = [p for p in matching_pages if ":" not in p]
# Load each page's content and parse the template.
for p in sorted(matching_pages):
if " campaign" in p: continue
if " (surname)" in p: continue
if "career of " in p: continue
if "for Congress" in p: continue
if p.startswith("List of "): continue
if p in ("New York in the American Civil War", "Upper Marlboro, Maryland"): continue
# Query the Wikipedia API to get the raw page content in XML,
# and then use XPath to get the raw page text.
url = "http://en.wikipedia.org/w/api.php?action=query&titles=" + urllib.parse.quote(p.encode("utf8")) + "&export&exportnowrap"
cache_path = "legislators/wikipedia/pages/" + p
dom = lxml.etree.fromstring(utils.download(url, cache_path, not cache))
page_content = dom.xpath("string(mw:page/mw:revision/mw:text)", namespaces={ "mw": "http://www.mediawiki.org/xml/export-0.8/" })
# Build a dict for the IDs that we want to insert into our files.
new_ids = {
"wikipedia": p # Wikipedia page name, with spaces for spaces (not underscores)
}
if "CongLinks" in page_content:
# Parse the key/val pairs in the template.
m = re.search(r"\{\{\s*CongLinks\s+([^}]*\S)\s*\}\}", page_content)
if not m: continue # no template?
for arg in m.group(1).split("|"):
if "=" not in arg: continue
key, val = arg.split("=", 1)
key = key.strip()
val = val.strip()
if val and key in fieldmap:
try:
if fieldmap[key] in int_fields: val = int(val)
except ValueError:
print("invalid value", key, val)
continue
if key == "opensecrets": val = val.replace("&newMem=Y", "").replace("&newmem=Y", "").replace("&cycle=2004", "").upper()
new_ids[fieldmap[key]] = val
if "bioguide" not in new_ids: continue
new_ids["bioguide"] = new_ids["bioguide"].upper() # hmm
bioguide = new_ids["bioguide"]
else:
m = re.search(r"\{\{\s*CongBio\s*\|\s*(\w+)\s*\}\}", page_content)
if not m: continue # no template?
bioguide = m.group(1).upper()
if not bioguide in bioguides:
print("Member not found: " + bioguide, p, "(Might have been a delegate to the Constitutional Convention.)")
continue
# handle FEC ids specially because they are stored in an array...
fec_id = new_ids.get("fec")
if fec_id: del new_ids["fec"]
member = bioguides[bioguide]
member["id"].update(new_ids)
# ...finish the FEC id.
if fec_id:
if fec_id not in bioguides[bioguide]["id"].get("fec", []):
bioguides[bioguide]["id"].setdefault("fec", []).append(fec_id)
#print p.encode("utf8"), new_ids
utils.save_data(y1, "legislators-current.yaml")
utils.save_data(y2, "legislators-historical.yaml")
if __name__ == '__main__':
METHOD_NAME() |
299,837 | func | import pytest
from virtool_core.models.roles import AdministratorRole
from virtool.authorization.relationships import AdministratorRoleAssignment
from virtool.authorization.client import get_authorization_client_from_app
from virtool.data.utils import get_data_from_app
from virtool.groups.oas import UpdateGroupRequest, UpdatePermissionsRequest
from virtool.mongo.utils import get_one_field
from virtool.users.db import validate_credentials
@pytest.mark.apitest
async def test_get_roles(spawn_client, snapshot):
client = await spawn_client(authorize=True, administrator=True)
resp = await client.get("/admin/roles")
assert resp.status == 200
assert await resp.json() == snapshot
@pytest.mark.apitest
async def test_list_users(spawn_client, fake2, snapshot, authorization_client):
client = await spawn_client(authorize=True, administrator=True)
user_1 = await fake2.users.create()
user_2 = await fake2.users.create()
authorization_client = client.app["authorization"]
await authorization_client.add(
AdministratorRoleAssignment(user_1.id, AdministratorRole.BASE),
AdministratorRoleAssignment(user_2.id, AdministratorRole.FULL),
)
resp = await client.get("/admin/users")
assert resp.status == 200
assert await resp.json() == snapshot
@pytest.mark.apitest
async def test_get_user(spawn_client, fake2, snapshot, static_time):
client = await spawn_client(authorize=True, administrator=True)
user = await fake2.users.create()
authorization_client = client.app["authorization"]
await authorization_client.add(
AdministratorRoleAssignment(user.id, AdministratorRole.BASE),
)
resp = await client.get(f"/admin/users/{user.id}")
assert resp.status == 200
assert await resp.json() == snapshot
@pytest.mark.apitest
@pytest.mark.parametrize(
"role", [None, AdministratorRole.USERS, AdministratorRole.FULL]
)
async def test_update_admin_role(spawn_client, fake2, snapshot, role, mongo):
client = await spawn_client(authorize=True, administrator=True)
user = await fake2.users.create()
resp = await client.put(f"/admin/users/{user.id}/role", {"role": role})
assert resp.status == 200
if role == AdministratorRole.FULL:
assert await get_one_field(mongo.users, "administrator", user.id) is True
assert await resp.json() == snapshot
@pytest.fixture
def setup_admin_update_user(fake2, spawn_client):
async def METHOD_NAME(administrator):
client = await spawn_client(authorize=True, administrator=administrator)
authorization_client = client.app["authorization"]
if not administrator:
await authorization_client.remove(
*[
AdministratorRoleAssignment("test", role)
for role in AdministratorRole
]
)
group_1 = await fake2.groups.create()
group_2 = await fake2.groups.create()
groups = get_data_from_app(client.app).groups
await groups.update(
group_1.id,
UpdateGroupRequest(permissions=UpdatePermissionsRequest(upload_file=True)),
)
await groups.update(
group_2.id,
UpdateGroupRequest(
permissions=UpdatePermissionsRequest(
create_sample=True, create_ref=True
)
),
)
user = await fake2.users.create(groups=[group_1])
await authorization_client.remove(
*[AdministratorRoleAssignment(user.id, role) for role in AdministratorRole]
)
return client, group_1, group_2, user
return METHOD_NAME
@pytest.mark.apitest
class TestUpdateUser:
async def test(self, setup_admin_update_user, snapshot, mongo):
client, group_1, _, user = await setup_admin_update_user(True)
resp = await client.patch(
f"/admin/users/{user.id}",
data={
"force_reset": True,
"password": "hello_world",
"primary_group": group_1.id,
},
)
assert resp.status == 200
assert await validate_credentials(mongo, user.id, "hello_world")
assert await resp.json() == snapshot
@pytest.mark.parametrize(
"administrator, target_administrator, status",
[
(None, None, 403),
(AdministratorRole.BASE, None, 403),
(AdministratorRole.USERS, None, 200),
(AdministratorRole.USERS, AdministratorRole.BASE, 403),
(AdministratorRole.FULL, AdministratorRole.BASE, 200),
],
)
async def test_set_admin_roles(
self,
setup_admin_update_user,
snapshot,
administrator,
target_administrator,
status,
):
client, _, _, user = await setup_admin_update_user(False)
authorization_client = get_authorization_client_from_app(client.app)
if administrator is not None:
await authorization_client.add(
AdministratorRoleAssignment("test", administrator)
)
if target_administrator is not None:
await authorization_client.add(
AdministratorRoleAssignment(user.id, target_administrator)
)
resp = await client.patch(
f"/admin/users/{user.id}",
data={
"force_reset": True,
},
)
assert resp.status == status
if status == 200:
body = await resp.json()
assert body["force_reset"] is True
assert body == snapshot
@pytest.mark.apitest
@pytest.mark.parametrize("name,status", [("relist_jobs", 202), ("foo", 400)])
async def test_run_actions(spawn_client, fake2, snapshot, mongo, name, status):
client = await spawn_client(authorize=True, administrator=True)
resp = await client.put("/admin/actions", {"name": name})
assert resp.status == status |
299,838 | get approve url | from django.urls import reverse
from rest_framework import status
from lego.apps.quotes.models import Quote
from lego.apps.users.models import AbakusGroup, User
from lego.utils.test_utils import BaseAPITestCase
def _get_list_url():
return reverse("api:v1:quote-list")
def _get_list_approved_url():
return _get_list_url() + "?approved=True"
def _get_list_unapproved_url():
return _get_list_url() + "?approved=False"
def _get_detail_url(pk):
return reverse("api:v1:quote-detail", kwargs={"pk": pk})
def METHOD_NAME(pk):
return reverse("api:v1:quote-approve", kwargs={"pk": pk})
def _get_unapprove_url(pk):
return reverse("api:v1:quote-unapprove", kwargs={"pk": pk})
class QuoteViewSetTestCase(BaseAPITestCase):
fixtures = ["test_users.yaml", "test_abakus_groups.yaml", "test_quotes.yaml"]
def setUp(self):
self.authenticated_user = User.objects.get(username="test1")
self.group = AbakusGroup.objects_with_text.get(name="QuoteAdminTest")
self.group.add_user(self.authenticated_user)
self.unauthenticated_user = User.objects.get(username="test2")
self.quote_data = {"text": "TestText", "source": "TestSource"}
def test_create_authenticated(self):
"""Users with permissions should be able to create quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.post(_get_list_url(), self.quote_data)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
def test_create_unauthenticated(self):
"""Users with no permissions should not be able to create quotes"""
self.client.force_authenticate(self.unauthenticated_user)
response = self.client.post(_get_list_url(), self.quote_data)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_list_authenticated(self):
"""Users with permissions should be able to list quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_list_approved_url())
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.json())
def test_list_unauthenticated(self):
"""Users with no permissions should not be able to list quotes"""
self.client.force_authenticate(user=self.unauthenticated_user)
response = self.client.get(_get_list_approved_url())
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_detail_authenticated(self):
"""Users with permissions should be able to see detailed quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_detail_url(1))
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue(response.json())
def test_detail_unauthenticated(self):
"""Users with no permissions should not be able see detailed quotes"""
self.client.force_authenticate(user=self.unauthenticated_user)
response = self.client.get(_get_detail_url(1))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_approve_authenticated(self):
"""Users with permissions should be able to approve quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.put(METHOD_NAME(3))
self.assertEqual(response.status_code, status.HTTP_200_OK)
quote = Quote.objects.get(id=3)
self.assertTrue(quote.approved)
def test_approve_permission(self):
"""Users should not have permission to approve their own quotes"""
self.client.force_authenticate(self.authenticated_user)
self.client.post(_get_list_url(), self.quote_data)
response = self.client.put(METHOD_NAME(4))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
quote = Quote.objects.get(id=4)
self.assertFalse(quote.approved)
def test_approve_unauthenticated(self):
"""Users with no permissions should not be able to approve quotes"""
self.client.force_authenticate(self.unauthenticated_user)
response = self.client.put(METHOD_NAME(3))
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_list_unapproved_authenticated(self):
"""Users with permissions should be able to see unapproved quotes"""
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_list_unapproved_url())
self.assertEqual(response.status_code, status.HTTP_200_OK)
first_quote = response.json()["results"][0]
self.assertFalse(first_quote["approved"])
def test_list_unapproved_unauthenticated(self):
"""Users with no permissions should not be able to see unapproved quotes"""
self.client.force_authenticate(self.unauthenticated_user)
response = self.client.get(_get_list_unapproved_url())
self.assertEqual(status.HTTP_403_FORBIDDEN, response.status_code)
def test_list_approved_unauthorized(self):
"""Users with regular permissions should be able to see approved quotes"""
self.group.permissions.remove("/sudo/admin/quotes/edit/")
self.group.save()
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_list_approved_url())
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertTrue(len(response.json()["results"]) > 0)
def test_list_unapproved_unauthorized(self):
"""Users with regular permissions should not be able to see unapproved quotes"""
self.group.permissions.remove("/sudo/admin/quotes/edit/")
self.group.save()
self.client.force_authenticate(self.authenticated_user)
response = self.client.get(_get_list_unapproved_url())
self.assertEqual(status.HTTP_200_OK, response.status_code)
self.assertEqual(len(response.json()["results"]), 0) |
299,839 | test mix up call results with labels | # Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.layers.preprocessing.mix_up import MixUp
from keras_cv.tests.test_case import TestCase
num_classes = 10
class MixUpTest(TestCase):
def test_return_shapes(self):
xs = tf.ones((2, 512, 512, 3))
# randomly sample labels
ys_labels = tf.random.categorical(tf.math.log([[0.5, 0.5]]), 2)
ys_labels = tf.squeeze(ys_labels)
ys_labels = tf.one_hot(ys_labels, num_classes)
# randomly sample bounding boxes
ys_bounding_boxes = {
"boxes": tf.random.uniform((2, 3, 4), 0, 1),
"classes": tf.random.uniform((2, 3), 0, 1),
}
# randomly sample segmentation mask
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((512, 512)), tf.ones((512, 512))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = MixUp()
# mixup on labels
outputs = layer(
{
"images": xs,
"labels": ys_labels,
"bounding_boxes": ys_bounding_boxes,
"segmentation_masks": ys_segmentation_masks,
}
)
xs, ys_labels, ys_bounding_boxes, ys_segmentation_masks = (
outputs["images"],
outputs["labels"],
outputs["bounding_boxes"],
outputs["segmentation_masks"],
)
self.assertEqual(xs.shape, [2, 512, 512, 3])
self.assertEqual(ys_labels.shape, [2, 10])
self.assertEqual(ys_bounding_boxes["boxes"].shape, [2, 6, 4])
self.assertEqual(ys_bounding_boxes["classes"].shape, [2, 6])
self.assertEqual(ys_segmentation_masks.shape, [2, 512, 512, 3])
def METHOD_NAME(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = MixUp()
outputs = layer({"images": xs, "labels": ys})
xs, ys = outputs["images"], outputs["labels"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No labels should still be close to their originals
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_mix_up_call_results_with_masks(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys_segmentation_masks = tf.cast(
tf.stack(
[2 * tf.ones((4, 4)), tf.ones((4, 4))],
axis=0,
),
tf.uint8,
)
ys_segmentation_masks = tf.one_hot(ys_segmentation_masks, 3)
layer = MixUp()
outputs = layer(
{"images": xs, "segmentation_masks": ys_segmentation_masks}
)
xs, ys_segmentation_masks = (
outputs["images"],
outputs["segmentation_masks"],
)
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No masks should still be close to their originals
self.assertNotAllClose(ys_segmentation_masks, 1.0)
self.assertNotAllClose(ys_segmentation_masks, 0.0)
def test_in_tf_function(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((4, 4, 3)), tf.ones((4, 4, 3))],
axis=0,
),
tf.float32,
)
ys = tf.one_hot(tf.constant([0, 1]), 2)
layer = MixUp()
@tf.function
def augment(x, y):
return layer({"images": x, "labels": y})
outputs = augment(xs, ys)
xs, ys = outputs["images"], outputs["labels"]
# None of the individual values should still be close to 1 or 0
self.assertNotAllClose(xs, 1.0)
self.assertNotAllClose(xs, 2.0)
# No labels should still be close to their originals
self.assertNotAllClose(ys, 1.0)
self.assertNotAllClose(ys, 0.0)
def test_image_input_only(self):
xs = tf.cast(
tf.stack(
[2 * tf.ones((100, 100, 1)), tf.ones((100, 100, 1))], axis=0
),
tf.float32,
)
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "expects inputs in a dictionary"
):
_ = layer(xs)
def test_single_image_input(self):
xs = tf.ones((512, 512, 3))
ys = tf.one_hot(tf.constant([1]), 2)
inputs = {"images": xs, "labels": ys}
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp received a single image to `call`"
):
_ = layer(inputs)
def test_int_labels(self):
xs = tf.ones((2, 512, 512, 3))
ys = tf.one_hot(tf.constant([1, 0]), 2, dtype=tf.int32)
inputs = {"images": xs, "labels": ys}
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp received labels with type"
):
_ = layer(inputs)
def test_image_input(self):
xs = tf.ones((2, 512, 512, 3))
layer = MixUp()
with self.assertRaisesRegexp(
ValueError, "MixUp expects inputs in a dictionary with format"
):
_ = layer(xs) |
299,840 | resource guid | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetNetworkGroupResult',
'AwaitableGetNetworkGroupResult',
'get_network_group',
'get_network_group_output',
]
@pulumi.output_type
class GetNetworkGroupResult:
"""
The network group resource
"""
def __init__(__self__, description=None, etag=None, id=None, name=None, provisioning_state=None, METHOD_NAME=None, system_data=None, type=None):
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if METHOD_NAME and not isinstance(METHOD_NAME, str):
raise TypeError("Expected argument 'resource_guid' to be a str")
pulumi.set(__self__, "resource_guid", METHOD_NAME)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
A description of the network group.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the scope assignment resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def METHOD_NAME(self) -> str:
"""
Unique identifier for this resource.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
The system metadata related to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetNetworkGroupResult(GetNetworkGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetNetworkGroupResult(
description=self.description,
etag=self.etag,
id=self.id,
name=self.name,
provisioning_state=self.provisioning_state,
METHOD_NAME=self.METHOD_NAME,
system_data=self.system_data,
type=self.type)
def get_network_group(network_group_name: Optional[str] = None,
network_manager_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetNetworkGroupResult:
"""
Gets the specified network group.
:param str network_group_name: The name of the network group.
:param str network_manager_name: The name of the network manager.
:param str resource_group_name: The name of the resource group.
"""
__args__ = dict()
__args__['networkGroupName'] = network_group_name
__args__['networkManagerName'] = network_manager_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:network/v20230501:getNetworkGroup', __args__, opts=opts, typ=GetNetworkGroupResult).value
return AwaitableGetNetworkGroupResult(
description=pulumi.get(__ret__, 'description'),
etag=pulumi.get(__ret__, 'etag'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
METHOD_NAME=pulumi.get(__ret__, 'resource_guid'),
system_data=pulumi.get(__ret__, 'system_data'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_network_group)
def get_network_group_output(network_group_name: Optional[pulumi.Input[str]] = None,
network_manager_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetNetworkGroupResult]:
"""
Gets the specified network group.
:param str network_group_name: The name of the network group.
:param str network_manager_name: The name of the network manager.
:param str resource_group_name: The name of the resource group.
"""
... |
299,841 | scan for blocks | """
Celery task for CSV student answer export.
"""
import time
from celery import shared_task
from celery.utils.log import get_task_logger
from django.contrib.auth.models import User
from django.db.models import F
from lms.djangoapps.instructor_task.models import ReportStore
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, UsageKey
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
from problem_builder.answer import AnswerBlock
from .mcq import MCQBlock, RatingBlock
from .mrq import MRQBlock
from .questionnaire import QuestionnaireAbstractBlock
from .sub_api import sub_api
logger = get_task_logger(__name__)
@shared_task()
def export_data(course_id, source_block_id_str, block_types, user_ids, match_string):
"""
Exports student answers to all supported questions to a CSV file.
"""
start_timestamp = time.time()
logger.debug("Beginning data export")
try:
course_key = CourseKey.from_string(course_id)
usage_key = UsageKey.from_string(source_block_id_str)
except InvalidKeyError as err:
raise ValueError("Could not find the specified Block ID.") from err
src_block = modulestore().get_item(usage_key)
course_key_str = str(course_key)
type_map = {cls.__name__: cls for cls in [MCQBlock, MRQBlock, RatingBlock, AnswerBlock]}
if not block_types:
block_types = tuple(type_map.values())
else:
block_types = tuple(type_map[class_name] for class_name in block_types)
# Build an ordered list of blocks to include in the export
blocks_to_include = []
def METHOD_NAME(block):
""" Recursively scan the course tree for blocks of interest """
if isinstance(block, block_types):
blocks_to_include.append(block)
elif block.has_children:
for child_id in block.children:
try:
METHOD_NAME(block.runtime.get_block(child_id))
except ItemNotFoundError:
# Blocks may refer to missing children. Don't break in this case.
pass
METHOD_NAME(src_block)
# Define the header row of our CSV:
rows = []
rows.append(
["Section", "Subsection", "Unit", "Type", "Question", "Answer", "Username", "User ID", "User E-mail"]
)
# Collect results for each block in blocks_to_include
for block in blocks_to_include:
if not user_ids:
results = _extract_data(course_key_str, block, None, match_string)
rows += results
else:
for user_id in user_ids:
results = _extract_data(course_key_str, block, user_id, match_string)
rows += results
# Generate the CSV:
timestamp = time.strftime("%Y-%m-%d-%H%M%S", time.gmtime(start_timestamp))
filename = f"pb-data-export-{timestamp}.csv"
report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')
report_store.store_rows(course_key, filename, rows)
generation_time_s = time.time() - start_timestamp
logger.debug(f"Done data export - took {generation_time_s} seconds")
return {
"error": None,
"report_filename": filename,
"start_timestamp": start_timestamp,
"generation_time_s": generation_time_s,
"display_data": [] if len(rows) == 1 else rows[1:1001] # Limit to preview of 1000 items
}
def _extract_data(course_key_str, block, user_id, match_string):
"""
Extract results for `block`.
"""
rows = []
# Extract info for "Section", "Subsection", and "Unit" columns
section_name, subsection_name, unit_name = _get_context(block)
# Extract info for "Type" column
block_type = _get_type(block)
# Extract info for "Question" column
block_question = _get_question(block)
# Extract info for "Answer" and "Username" columns
# - Get all of the most recent student submissions for this block:
submissions = tuple(_get_submissions(course_key_str, block, user_id))
# If the student ID key doesn't exist, we're dealing with a single student and know the ID already.
student_ids = [submission.get('student_id', user_id) for submission in submissions]
users = get_users_by_anonymous_ids(student_ids)
# - For each submission, look up student's username, email and answer:
answer_cache = {}
for submission in submissions:
student_id = submission.get('student_id', user_id)
username, _user_id, user_email = users.get(
student_id,
(student_id, 'N/A', 'N/A')
)
answer = _get_answer(block, submission, answer_cache)
# Short-circuit if answer does not match search criteria
if not match_string.lower() in answer.lower():
continue
rows.append([
section_name,
subsection_name,
unit_name,
block_type,
block_question,
answer,
username,
_user_id,
user_email
])
return rows
def _get_context(block):
"""
Return section, subsection, and unit names for `block`.
"""
block_names_by_type = {}
block_iter = block
while block_iter:
block_iter_type = block_iter.scope_ids.block_type
block_names_by_type[block_iter_type] = block_iter.display_name_with_default
block_iter = block_iter.get_parent() if block_iter.parent else None
section_name = block_names_by_type.get('chapter', '')
subsection_name = block_names_by_type.get('sequential', '')
unit_name = block_names_by_type.get('vertical', '')
return section_name, subsection_name, unit_name
def _get_type(block):
"""
Return type of `block`.
"""
return block.scope_ids.block_type
def _get_question(block):
"""
Return question for `block`; default to question ID if `question` is not set.
"""
return block.question or block.name
def _get_submissions(course_key_str, block, user_id):
"""
Return submissions for `block`.
"""
# Load the actual student submissions for `block`.
# Note this requires one giant query that retrieves all student submissions for `block` at once.
block_id = str(block.scope_ids.usage_id.replace(branch=None, version_guid=None))
block_type = _get_type(block)
if block_type == 'pb-answer':
block_id = block.name # item_id of Long Answer submission matches question ID and not block ID
if not user_id:
return sub_api.get_all_submissions(course_key_str, block_id, block_type)
else:
student_dict = {
'student_id': user_id,
'item_id': block_id,
'course_id': course_key_str,
'item_type': block_type,
}
return sub_api.get_submissions(student_dict, limit=1)
def get_users_by_anonymous_ids(anonymous_ids):
"""
Return users by anonymous_ids using AnonymousUserId lookup table.
"""
if not anonymous_ids:
return None
users = User.objects.filter(
anonymoususerid__anonymous_user_id__in=anonymous_ids
).annotate(
anonymous_user_id=F('anonymoususerid__anonymous_user_id')
).values(
'anonymous_user_id', 'username', 'id', 'email'
).iterator()
return {
user['anonymous_user_id']: (user['username'], user['id'], user['email']) for user in users
}
def _get_answer(block, submission, answer_cache):
"""
Return answer associated with `submission` to `block`.
`answer_cache` is a dict that is reset for each block.
"""
answer = submission['answer']
if isinstance(block, QuestionnaireAbstractBlock):
# Convert from answer ID to answer label
if answer not in answer_cache:
answer_cache[answer] = block.get_submission_display(answer)
return answer_cache[answer]
return answer |
299,842 | read dataset message trees | import gzip
import json
from pathlib import Path
from typing import Callable, Iterable, Optional, TextIO
import pydantic
from datasets import load_dataset
from .schemas import ExportMessageNode, ExportMessageTree
def open_jsonl_read(input_file_path: str | Path) -> TextIO:
if not isinstance(input_file_path, Path):
input_file_path = Path(input_file_path)
if input_file_path.suffix == ".gz":
return gzip.open(str(input_file_path), mode="tr", encoding="UTF-8")
else:
return input_file_path.open("r", encoding="UTF-8")
def read_oasst_obj(obj_dict: dict) -> ExportMessageTree | ExportMessageNode:
# validate data
if "message_id" in obj_dict:
return pydantic.parse_obj_as(ExportMessageNode, obj_dict)
elif "message_tree_id" in obj_dict:
return pydantic.parse_obj_as(ExportMessageTree, obj_dict)
raise RuntimeError("Unknown object in jsonl file")
def read_oasst_jsonl(
input_file_path: str | Path,
) -> Iterable[ExportMessageTree | ExportMessageNode]:
with open_jsonl_read(input_file_path) as file_in:
# read one object per line
for line in file_in:
dict_tree = json.loads(line)
yield read_oasst_obj(dict_tree)
def read_message_trees(input_file_path: str | Path) -> Iterable[ExportMessageTree]:
for x in read_oasst_jsonl(input_file_path):
assert isinstance(x, ExportMessageTree)
yield x
def read_message_tree_list(
input_file_path: str | Path,
filter: Optional[Callable[[ExportMessageTree], bool]] = None,
) -> list[ExportMessageTree]:
return [t for t in read_message_trees(input_file_path) if not filter or filter(t)]
def convert_hf_message(row: dict) -> None:
emojis = row.get("emojis")
if emojis:
row["emojis"] = dict(zip(emojis["name"], emojis["count"]))
labels = row.get("labels")
if labels:
row["labels"] = {
name: {"value": value, "count": count}
for name, value, count in zip(labels["name"], labels["value"], labels["count"])
}
def read_messages(input_file_path: str | Path) -> Iterable[ExportMessageNode]:
for x in read_oasst_jsonl(input_file_path):
assert isinstance(x, ExportMessageNode)
yield x
def read_message_list(
input_file_path: str | Path,
filter: Optional[Callable[[ExportMessageNode], bool]] = None,
) -> list[ExportMessageNode]:
return [t for t in read_messages(input_file_path) if not filter or filter(t)]
def METHOD_NAME(
hf_dataset_name: str = "OpenAssistant/oasst1",
split: str = "train+validation",
) -> Iterable[ExportMessageTree]:
dataset = load_dataset(hf_dataset_name, split=split)
tree_dict: dict = None
parents: list = None
for row in dataset:
convert_hf_message(row)
if row["parent_id"] is None:
if tree_dict:
tree = read_oasst_obj(tree_dict)
assert isinstance(tree, ExportMessageTree)
yield tree
tree_dict = {
"message_tree_id": row["message_id"],
"tree_state": row["tree_state"],
"prompt": row,
}
parents = []
else:
while parents[-1]["message_id"] != row["parent_id"]:
parents.pop()
parent = parents[-1]
if "replies" not in parent:
parent["replies"] = []
parent["replies"].append(row)
row.pop("message_tree_id", None)
row.pop("tree_state", None)
parents.append(row)
if tree_dict:
tree = read_oasst_obj(tree_dict)
assert isinstance(tree, ExportMessageTree)
yield tree
def read_dataset_messages(
hf_dataset_name: str = "OpenAssistant/oasst1",
split: str = "train+validation",
) -> Iterable[ExportMessageNode]:
dataset = load_dataset(hf_dataset_name, split=split)
for row in dataset:
convert_hf_message(row)
message = read_oasst_obj(row)
assert isinstance(message, ExportMessageNode)
yield message |
299,843 | analyze for missing optional keyword |
from vsg import parser
from vsg import token
from vsg import violation
from vsg.vhdlFile import utils
from vsg.rule_group import structure
from vsg.rules import utils as rules_utils
oInsertTokens = token.for_generate_statement.end_generate_label
oAnchorTokens = token.for_generate_statement.semicolon
oLeftTokens = token.for_generate_statement.end_keyword
oRightTokens = token.for_generate_statement.semicolon
oValueTokens = token.for_generate_statement.generate_label
lRemoveTokens = []
lRemoveTokens.append(token.for_generate_statement.end_generate_label)
lRemoveTokens.append(token.if_generate_statement.end_generate_label)
lRemoveTokens.append(token.case_generate_statement.end_generate_label)
class rule_011(structure.Rule):
'''
This rule checks the **end generate** label on for, case and if generate statements.
|configuring_optional_items_link|
**Violation**
.. code-block:: vhdl
ram_array : for i in 0 to 127 generate
end generate;
**Fix**
.. code-block:: vhdl
ram_array : for i in 0 to 127 generate
end generate ram_array;
'''
def __init__(self):
structure.Rule.__init__(self, 'generate', '011')
self.solution = 'generate label'
self.insert_token = oInsertTokens
self.anchor_token = oAnchorTokens
self.left_token = oLeftTokens
self.right_token = oRightTokens
self.value_token = oValueTokens
self.groups.append('structure::optional')
self.configuration_documentation_link = None
self.action = 'add'
self.configuration.append('action')
self.configuration_documentation_link = 'configuring_optional_items_link'
def _get_tokens_of_interest(self, oFile):
if remove_keyword(self):
return oFile.get_token_and_n_tokens_before_it(lRemoveTokens, 1)
else:
return oFile.get_tokens_bounded_by(token.architecture_body.begin_keyword, token.architecture_body.end_keyword)
def _analyze(self, lToi):
if remove_keyword(self):
analyze_for_existence_of_optional_keyword(lToi, self)
else:
METHOD_NAME(lToi, self)
def _fix_violation(self, oViolation):
if remove_keyword(self):
rules_utils.remove_optional_item(oViolation)
else:
add_optional_item(oViolation)
def add_optional_item(oViolation):
lTokens = oViolation.get_tokens()
dAction = oViolation.get_action()
lTokens.append(parser.whitespace(' '))
lTokens.append(dAction['label'])
oViolation.set_tokens(lTokens)
def analyze_for_existence_of_optional_keyword(lToi, self):
for oToi in lToi:
oViolation = create_violation(oToi, oToi.get_line_number(), self)
self.add_violation(oViolation)
def METHOD_NAME(lToi, self):
for oToi in lToi:
iLine, lTokens = utils.get_toi_parameters(oToi)
lLabels = []
for iToken, oToken in enumerate(lTokens):
iLine = utils.increment_line_number(iLine, oToken)
if manage_labels(oToken, lLabels):
continue
if isinstance(oToken, token.for_generate_statement.end_generate_keyword):
if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.for_generate_statement.end_generate_label], iToken + 1, lTokens):
oNewToi = oToi.extract_tokens(iToken, iToken)
dAction = {}
dAction['label'] = token.for_generate_statement.end_generate_label(lLabels[-1].get_value())
sSolution = 'Add label ' + lLabels[-1].get_value()
oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution)
oViolation.set_action(dAction)
self.add_violation(oViolation)
continue
if isinstance(oToken, token.if_generate_statement.end_generate_keyword):
if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.if_generate_statement.end_generate_label], iToken + 1, lTokens):
oNewToi = oToi.extract_tokens(iToken, iToken)
dAction = {}
dAction['label'] = token.if_generate_statement.end_generate_label(lLabels[-1].get_value())
sSolution = 'Add label ' + lLabels[-1].get_value()
oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution)
oViolation.set_action(dAction)
self.add_violation(oViolation)
continue
if isinstance(oToken, token.case_generate_statement.end_generate_keyword):
if not utils.are_next_consecutive_token_types_ignoring_whitespace([token.case_generate_statement.end_generate_label], iToken + 1, lTokens):
oNewToi = oToi.extract_tokens(iToken, iToken)
dAction = {}
dAction['label'] = token.case_generate_statement.end_generate_label(lLabels[-1].get_value())
sSolution = 'Add label ' + lLabels[-1].get_value()
oViolation = violation.New(oNewToi.get_line_number(), oNewToi, sSolution)
oViolation.set_action(dAction)
self.add_violation(oViolation)
continue
def manage_labels(oToken, lLabels):
if isinstance(oToken, token.for_generate_statement.generate_label):
lLabels.append(oToken)
return True
if isinstance(oToken, token.if_generate_statement.generate_label):
lLabels.append(oToken)
return True
if isinstance(oToken, token.case_generate_statement.generate_label):
lLabels.append(oToken)
return True
if isinstance(oToken, token.for_generate_statement.semicolon):
lLabels.pop()
return True
if isinstance(oToken, token.if_generate_statement.semicolon):
lLabels.pop()
return True
if isinstance(oToken, token.case_generate_statement.semicolon):
lLabels.pop()
return True
return False
def remove_keyword(self):
if self.action == 'remove':
return True
return False
def create_violation(oToi, iLineNumber, self):
sSolution = self.action.capitalize() + ' ' + self.solution
oViolation = violation.New(iLineNumber, oToi, sSolution)
return oViolation |
299,844 | get encoder | #-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <fabian.schindler@eox.at>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2013 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
try:
from io import StringIO
except ImportError:
from cStringIO import StringIO
try:
from PIL import Image, ImageFont, ImageDraw
HAS_PIL = True
except ImportError:
HAS_PIL = False
from lxml.builder import ElementMaker
from eoxserver.core import Component, implements
from eoxserver.core.decoders import kvp, lower
from eoxserver.core.util.xmltools import XMLEncoder, NameSpace, NameSpaceMap
from eoxserver.services.ows.interfaces import ExceptionHandlerInterface
try:
# Python 2
xrange
except NameError:
# Python 3, xrange is now named range
xrange = range
class WMS13ExceptionHandler(Component):
implements(ExceptionHandlerInterface)
service = "WMS"
versions = ("1.3.0", "1.3")
request = None
def METHOD_NAME(self, request):
decoder = WMS13Decoder(request.GET)
exceptions = decoder.exceptions
if exceptions in ("xml", "application/vnd.ogc.se_xml") or not HAS_PIL:
return WMS13ExceptionXMLEncoder()
elif exceptions in ("inimage", "blank"):
return WMS13ExceptionImageEncoder(
decoder.width, decoder.height, decoder.format, decoder.bgcolor,
exceptions=="blank"
)
print (decoder.exceptions)
def handle_exception(self, request, exception):
encoder = self.METHOD_NAME(request)
locator = getattr(exception, "locator", None)
code = getattr(exception, "code", None) or type(exception).__name__
return (
encoder.serialize(
encoder.encode_exception(
str(exception), code, locator
),
),
encoder.content_type,
400
)
class WMS13Decoder(kvp.Decoder):
width = kvp.Parameter(type=int, num="?")
height = kvp.Parameter(type=int, num="?")
format = kvp.Parameter(num="?")
bgcolor = kvp.Parameter(num="?")
exceptions = kvp.Parameter(num="?", type=lower, default="xml")
ns_ogc = NameSpace("http://www.opengis.net/ogc", "ogc")
nsmap = NameSpaceMap(ns_ogc)
OGC = ElementMaker(namespace=ns_ogc.uri, nsmap=nsmap)
class WMS13ExceptionXMLEncoder(XMLEncoder):
def encode_exception(self, message, code, locator=None):
attributes = {
"code": code
}
if locator:
attributes["locator"] = locator
return OGC("ServiceExceptionReport",
OGC("ServiceException",
str(message),
**attributes
),
version="1.3.0"
)
@property
def content_type(self):
return "application/vnd.ogc.se_xml"
def get_schema_locations(self):
return {
"http://www.opengis.net/ogc": "http://schemas.opengis.net/wms/1.3.0/exceptions_1_3_0.xsd"
}
class WMS13ExceptionImageEncoder(object):
def __init__(self, width=None, height=None, format=None, bgcolor=None, blank=False):
self.width = width if width > 0 else 256
self.height = height if height > 0 else 256
if "/" in format:
format = format[format.find("/") + 1:]
self.format = format or "jpeg"
self.bgcolor = bgcolor or "white"
self.blank = blank
@property
def content_type(self):
return "image/%s" % self.format
def encode_exception(self, message, code, locator=None):
width, height = self.width, self.height
image = Image.new("RGB", (width, height), self.bgcolor)
# if requested draw the exception string in the image
if not self.blank:
font = ImageFont.load_default()
draw = ImageDraw.Draw(image)
yoffset = 0
while len(message):
for i in xrange(len(message)):
part = message if i == 0 else message[:-i]
xsize, ysize = font.getsize(part)
print (i, xsize, ysize, part)
if xsize < width:
break
draw.text((0, yoffset), part, font=font, fill="red")
yoffset += ysize
message = message[-i:]
if i == 0:
break
return image
def serialize(self, image):
f = StringIO()
try:
image.save(f, self.format)
except (IOError, KeyError):
image.save(f, "jpeg") # Fallback solution
return f.getvalue() |
299,845 | test dirwatcher can overwrite policy for file | """dir_watcher tests."""
import os
import tempfile
import time
from pathlib import Path
from typing import TYPE_CHECKING, Callable
from unittest.mock import Mock, patch
import pytest
import wandb.filesync.dir_watcher
from wandb.filesync.dir_watcher import DirWatcher, PolicyEnd, PolicyLive, PolicyNow
from wandb.sdk.internal.file_pusher import FilePusher
if TYPE_CHECKING:
from wandb.sdk.interface.interface import PolicyName
@pytest.fixture
def file_pusher():
return Mock()
@pytest.fixture
def settings():
return Mock(ignore_globs=[])
@pytest.fixture
def tempdir():
with tempfile.TemporaryDirectory() as d:
yield Path(d)
@pytest.fixture
def dir_watcher(settings, file_pusher, tempdir: Path) -> DirWatcher:
with patch.object(wandb.filesync.dir_watcher, "wd_polling", Mock()):
yield DirWatcher(
settings=settings,
file_pusher=file_pusher,
file_dir=str(tempdir),
)
def write_with_mtime(path: Path, content: bytes, mtime: int) -> None:
path.write_bytes(content)
os.utime(str(path), (mtime, mtime))
@pytest.mark.parametrize(
["write_file", "expect_called"],
[
(lambda f: write_with_mtime(f, b"content", mtime=0), True),
(lambda f: write_with_mtime(f, b"", mtime=0), False),
(lambda f: None, False),
],
)
def test_dirwatcher_update_policy_live_calls_file_changed_iff_file_nonempty(
tempdir: Path,
file_pusher: FilePusher,
dir_watcher: DirWatcher,
write_file: Callable[[Path], None],
expect_called: bool,
):
"""Test that if a file exists, the update policy is called."""
f = tempdir / "my-file.txt"
write_file(f)
dir_watcher.update_policy(str(f), "live")
assert file_pusher.file_changed.called == expect_called
@pytest.mark.parametrize(
["policy", "expect_called"],
[
("now", True),
("live", True),
("end", False),
],
)
def test_dirwatcher_update_policy_on_nonexistent_file_calls_file_changed_when_file_created_iff_policy_now_or_live(
tempdir: Path,
file_pusher: FilePusher,
dir_watcher: DirWatcher,
policy: "PolicyName",
expect_called: bool,
):
f = tempdir / "my-file.txt"
dir_watcher.update_policy(str(f), policy)
write_with_mtime(f, b"content", mtime=0)
file_pusher.file_changed.assert_not_called()
dir_watcher._on_file_created(Mock(src_path=str(f)))
assert file_pusher.file_changed.called == expect_called
def test_dirwatcher_finish_uploads_unheardof_files(
tempdir: Path, file_pusher: FilePusher, dir_watcher: DirWatcher
):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
dir_watcher.finish()
file_pusher.file_changed.assert_called_once_with("my-file.txt", str(f), copy=False)
def test_dirwatcher_finish_skips_now_files(
tempdir: Path, file_pusher: FilePusher, dir_watcher: DirWatcher
):
f = tempdir / "my-file.txt"
dir_watcher.update_policy(str(f), "now")
write_with_mtime(f, b"content", mtime=0)
dir_watcher.finish()
file_pusher.file_changed.assert_not_called()
def test_dirwatcher_finish_uploads_end_files(
tempdir: Path, file_pusher: FilePusher, dir_watcher: DirWatcher
):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
dir_watcher.update_policy(str(f), "end")
dir_watcher.finish()
file_pusher.file_changed.assert_called_once_with("my-file.txt", str(f), copy=False)
@pytest.mark.parametrize("changed", [True, False])
def test_dirwatcher_finish_uploads_live_files_iff_changed(
tempdir: Path,
file_pusher: FilePusher,
dir_watcher: DirWatcher,
changed: bool,
):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
dir_watcher.update_policy(str(f), "live")
if changed:
write_with_mtime(f, b"new content", mtime=1)
file_pusher.file_changed.reset_mock()
dir_watcher.finish()
assert file_pusher.file_changed.called == changed
@pytest.mark.parametrize("ignore", [True, False])
def test_dirwatcher_finish_skips_ignoreglob_files(
tempdir: Path,
file_pusher: FilePusher,
dir_watcher: DirWatcher,
settings,
ignore: bool,
):
if ignore:
settings.ignore_globs = ["*.txt"]
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
dir_watcher.update_policy(str(f), "end")
dir_watcher.finish()
assert file_pusher.file_changed.called == (not ignore)
@pytest.mark.skip(
reason="Live *should* take precedence over Now, I think, but I don't want to change the existing behavior yet"
)
def test_dirwatcher_prefers_live_policy_when_multiple_rules_match_file(
tempdir: Path, dir_watcher: DirWatcher
):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
dir_watcher.update_policy("*.txt", "live")
dir_watcher.update_policy("my-file.*", "end")
dir_watcher.update_policy("my-*.txt", "now")
assert isinstance(
dir_watcher._get_file_event_handler(str(f), "my-file.txt"), PolicyLive
)
@pytest.mark.skip(
reason="Surprisingly, this test fails. Do we want to change behavior to make it pass? TODO(spencerpearson)"
)
def METHOD_NAME(
tempdir: Path, dir_watcher: DirWatcher
):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
dir_watcher.update_policy("my-file.txt", "live")
assert isinstance(
dir_watcher._get_file_event_handler(str(f), "my-file.txt"), PolicyLive
)
dir_watcher.update_policy("my-file.txt", "end")
assert isinstance(
dir_watcher._get_file_event_handler(str(f), "my-file.txt"), PolicyEnd
)
def test_policylive_uploads_nonempty_unchanged_file_on_modified(
tempdir: Path, file_pusher: Mock
):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
policy = PolicyLive(str(f), f.name, file_pusher)
policy.on_modified()
file_pusher.file_changed.assert_called_once_with(f.name, str(f))
def test_policylive_ratelimits_modified_file_reupload(tempdir: Path, file_pusher: Mock):
elapsed = 0
with patch.object(time, "time", lambda: elapsed):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
policy = PolicyLive(str(f), f.name, file_pusher)
policy.on_modified()
threshold = max(
PolicyLive.RATE_LIMIT_SECONDS,
PolicyLive.min_wait_for_size(len(f.read_bytes())),
)
file_pusher.reset_mock()
elapsed = threshold - 1
write_with_mtime(f, b"new content", mtime=elapsed)
policy.on_modified()
file_pusher.file_changed.assert_not_called()
elapsed = threshold + 1
write_with_mtime(f, b"new content", mtime=elapsed)
policy.on_modified()
file_pusher.file_changed.assert_called()
def test_policylive_forceuploads_on_finish(tempdir: Path, file_pusher: Mock):
elapsed = 0
with patch.object(time, "time", lambda: elapsed):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
policy = PolicyLive(str(f), f.name, file_pusher)
policy.on_modified()
file_pusher.reset_mock()
elapsed += 1
write_with_mtime(f, b"new content", mtime=elapsed)
policy.on_modified() # modifying the file shouldn't re-upload it because of the rate-limiting...
file_pusher.file_changed.assert_not_called()
policy.finish() # ...but finish() should force a re-upload
file_pusher.file_changed.assert_called()
def test_policynow_uploads_on_modified_iff_not_already_uploaded(
tempdir: Path, file_pusher: Mock
):
f = tempdir / "my-file.txt"
write_with_mtime(f, b"content", mtime=0)
policy = PolicyNow(str(f), f.name, file_pusher)
policy.on_modified()
file_pusher.file_changed.assert_called()
file_pusher.reset_mock()
write_with_mtime(f, b"content", mtime=99999)
policy.on_modified()
file_pusher.file_changed.assert_not_called() |
299,846 | test shuffle | import numpy as np
from keras_core import testing
from keras_core.utils import timeseries_dataset_utils
class TimeseriesDatasetTest(testing.TestCase):
def test_basics(self):
# Test ordering, targets, sequence length, batch size
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5
)
# Expect 19 batches
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 18:
self.assertEqual(inputs.shape, (5, 9))
if i == 18:
# Last batch: size 2
self.assertEqual(inputs.shape, (2, 9))
# Check target values
self.assertAllClose(targets, inputs[:, 0] * 2)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
self.assertAllClose(
inputs[j], np.arange(i * 5 + j, i * 5 + j + 9)
)
def test_timeseries_regression(self):
# Test simple timeseries regression use case
data = np.arange(10)
offset = 3
targets = data[offset:]
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=offset, batch_size=1
)
i = 0
for batch in dataset:
self.assertLen(batch, 2)
inputs, targets = batch
self.assertEqual(inputs.shape, (1, 3))
# Check values
self.assertAllClose(targets[0], data[offset + i])
self.assertAllClose(inputs[0], data[i : i + offset])
i += 1
self.assertEqual(i, 7) # Expect 7 batches
def test_no_targets(self):
data = np.arange(50)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, None, sequence_length=10, batch_size=5
)
# Expect 9 batches
i = None
for i, batch in enumerate(dataset):
if i < 8:
self.assertEqual(batch.shape, (5, 10))
elif i == 8:
self.assertEqual(batch.shape, (1, 10))
for j in range(min(5, len(batch))):
# Check each sample in the batch
self.assertAllClose(
batch[j], np.arange(i * 5 + j, i * 5 + j + 10)
)
self.assertEqual(i, 8)
def METHOD_NAME(self):
# Test cross-epoch random order and seed determinism
data = np.arange(10)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
targets,
sequence_length=5,
batch_size=1,
shuffle=True,
seed=123,
)
first_seq = None
for x, y in dataset.take(1):
self.assertNotAllClose(x, np.arange(0, 5))
self.assertAllClose(x[:, 0] * 2, y)
first_seq = x
# Check that a new iteration with the same dataset yields different
# results
for x, _ in dataset.take(1):
self.assertNotAllClose(x, first_seq)
# Check determism with same seed
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
targets,
sequence_length=5,
batch_size=1,
shuffle=True,
seed=123,
)
for x, _ in dataset.take(1):
self.assertAllClose(x, first_seq)
def test_sampling_rate(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sampling_rate=2
)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 16:
self.assertEqual(inputs.shape, (5, 9))
if i == 16:
# Last batch: size 4
self.assertEqual(inputs.shape, (4, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 + j
end_index = start_index + 9 * 2
self.assertAllClose(
inputs[j], np.arange(start_index, end_index, 2)
)
def test_sequence_stride(self):
data = np.arange(100)
targets = data * 2
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, targets, sequence_length=9, batch_size=5, sequence_stride=3
)
for i, batch in enumerate(dataset):
self.assertLen(batch, 2)
inputs, targets = batch
if i < 6:
self.assertEqual(inputs.shape, (5, 9))
if i == 6:
# Last batch: size 1
self.assertEqual(inputs.shape, (1, 9))
# Check target values
self.assertAllClose(inputs[:, 0] * 2, targets)
for j in range(min(5, len(inputs))):
# Check each sample in the batch
start_index = i * 5 * 3 + j * 3
end_index = start_index + 9
self.assertAllClose(
inputs[j], np.arange(start_index, end_index)
)
def test_start_and_end_index(self):
data = np.arange(100)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data,
None,
sequence_length=9,
batch_size=5,
sequence_stride=3,
sampling_rate=2,
start_index=10,
end_index=90,
)
for batch in dataset:
self.assertLess(np.max(batch[0]), 90)
self.assertGreater(np.min(batch[0]), 9)
def test_errors(self):
# bad start index
with self.assertRaisesRegex(ValueError, "`start_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=-1
)
with self.assertRaisesRegex(ValueError, "`start_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, start_index=11
)
# bad end index
with self.assertRaisesRegex(ValueError, "`end_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=-1
)
with self.assertRaisesRegex(ValueError, "`end_index` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, end_index=11
)
# bad sampling_rate
with self.assertRaisesRegex(ValueError, "`sampling_rate` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, sampling_rate=0
)
# bad sequence stride
with self.assertRaisesRegex(ValueError, "`sequence_stride` must be "):
_ = timeseries_dataset_utils.timeseries_dataset_from_array(
np.arange(10), None, 3, sequence_stride=0
)
def test_not_batched(self):
data = np.arange(100)
dataset = timeseries_dataset_utils.timeseries_dataset_from_array(
data, None, sequence_length=9, batch_size=None, shuffle=True
)
sample = next(iter(dataset))
self.assertEqual(len(sample.shape), 1) |
299,847 | test max size rendition img tag | from unittest.mock import Mock, patch
from django.db import IntegrityError
from django.test import TestCase
from wagtail.images.models import Filter
from wagtail.images.tests.utils import get_test_image_file
from v1.models.images import CFGOVImage, CFGOVRendition
class CFGOVImageTest(TestCase):
def setUp(self):
def mock_with_name(name):
mock = Mock()
mock.configure_mock(name=name, url="https://url")
return mock
self.mock_gif = mock_with_name("test.gif")
self.mock_png = mock_with_name("test.png")
def test_no_renditions_by_default(self):
self.assertFalse(CFGOVRendition.objects.exists())
def test_original_rendition_calls_super_for_png(self):
image = CFGOVImage(file=self.mock_png, width=100, height=100)
with patch("v1.models.images.AbstractImage.get_rendition") as p:
image.get_rendition("original")
p.assert_called_once_with("original")
def test_original_rendition_makes_mock_rendition_for_gif(self):
image = CFGOVImage(file=self.mock_gif, width=100, height=100)
rendition = image.get_rendition("original")
self.assertEqual(rendition.image, image)
def test_non_resize_rendition_calls_super_for_png(self):
with patch("v1.models.images.AbstractImage.get_rendition") as p:
image = CFGOVImage(file=self.mock_png, width=100, height=100)
image.get_rendition("fill-200x200")
p.assert_called_once_with("fill-200x200")
def test_non_resize_rendition_raises_for_gif(self):
image = CFGOVImage(file=self.mock_gif, width=100, height=100)
with self.assertRaises(RuntimeError):
image.get_rendition("fill-200x200")
def test_image_original_rendition_size(self):
image = CFGOVImage(file=self.mock_gif, width=100, height=100)
rendition = image.get_rendition("original")
self.assertEqual(rendition.width, image.width)
self.assertEqual(rendition.height, image.height)
def test_image_original_filter_class(self):
image = CFGOVImage(file=self.mock_gif, width=100, height=100)
rendition_filter = Filter(spec="original")
rendition = image.get_rendition(rendition_filter)
self.assertEqual(rendition.file, image.file)
def test_image_original_rendition_img_tag(self):
image = CFGOVImage(file=self.mock_gif, width=100, height=100)
rendition = image.get_rendition("original")
self.assertEqual(
rendition.img_tag(),
'<img alt="" height="100" src="https://url" width="100">',
)
def test_max_size_rendition(self):
image = CFGOVImage(file=self.mock_gif, width=100, height=100)
rendition = image.get_rendition("max-165x165")
self.assertEqual(rendition.width, 100)
self.assertEqual(rendition.height, 100)
def METHOD_NAME(self):
mock_image = Mock(url="https://url")
image = CFGOVImage(file=mock_image, width=100, height=100)
rendition = image.get_rendition("max-165x165")
self.assertEqual(
rendition.img_tag(),
'<img alt="" height="100" src="https://url" width="100">',
)
def test_width_rendition_size(self):
image = CFGOVImage(file=self.mock_gif, width=500, height=300)
rendition = image.get_rendition("width-250")
self.assertEqual((rendition.width, rendition.height), (250, 150))
def test_width_rendition_img_tag(self):
image = CFGOVImage(file=self.mock_gif, width=500, height=300)
rendition = image.get_rendition("width-250")
self.assertEqual(
rendition.img_tag(),
'<img alt="" height="150" src="https://url" width="250">',
)
def test_twitter_card_large(self):
"""Twitter card property should be true if meta image is large"""
image = CFGOVImage(width=1200, height=600)
self.assertTrue(image.should_display_summary_large_image)
def test_twitter_card_small(self):
"""Twitter card property should be false if meta image is small"""
image = CFGOVImage(width=100, height=50)
self.assertFalse(image.should_display_summary_large_image)
def test_twitter_card_large_bad_ratio(self):
"""Twitter card property should be false if meta image ratio < 50%"""
image = CFGOVImage(width=1200, height=100)
self.assertFalse(image.should_display_summary_large_image)
class CFGOVRenditionTest(TestCase):
def test_uniqueness_constraint(self):
image = CFGOVImage.objects.create(
title="test", file=get_test_image_file()
)
filt = Filter(spec="original")
def create_rendition(image, filt):
return CFGOVRendition.objects.create(
filter_spec=filt.spec,
image=image,
file=image.file,
width=100,
height=100,
focal_point_key=filt.get_cache_key(image),
)
create_rendition(image=image, filt=filt)
with self.assertRaises(IntegrityError):
create_rendition(image=image, filt=filt) |
299,848 | test hint only | # -*- coding: utf-8 -*-
"""
Guidance hint test module.
"""
from tests.pyxform_test_case import PyxformTestCase
class GuidanceHintTest(PyxformTestCase):
"""Test guidance_hint XLSForms."""
def METHOD_NAME(self):
"""Test hint only column."""
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | hint |
| | string | name | Name | your name |
""",
xml__contains=["<hint>your name</hint>"],
)
def test_guidance_hint_and_label(self):
"""Test guidance_hint with label"""
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | label | guidance_hint |
| | string | name | Name | as shown on birth certificate|
""", # noqa
xml__contains=[
"<hint ref=\"jr:itext('/data/name:hint')\"/>",
'<value form="guidance">as shown on birth certificate</value>',
"<hint ref=\"jr:itext('/data/name:hint')\"/>",
],
run_odk_validate=True,
)
def test_hint_and_guidance_one_language(self): # pylint: disable=C0103
"""Test guidance_hint in one language."""
self.assertPyxformXform(
name="data",
md="""
| survey | | | | | |
| | type | name | label | hint | guidance_hint |
| | string | name | Name | your name | as shown on birth certificate|
""", # noqa
xml__contains=[
"<hint ref=\"jr:itext('/data/name:hint')\"/>",
"<value>your name</value>",
'<value form="guidance">as shown on birth certificate</value>',
],
)
def test_multi_language_guidance(self):
"""Test guidance_hint in multiple languages."""
self.assertPyxformXform(
name="data",
md="""
| survey | | | | | | |
| | type | name | label | hint | guidance_hint | guidance_hint::French (fr) |
| | string | name | Name | your name | as shown on birth certificate| comme sur le certificat de naissance|
""", # noqa
xml__contains=[
'<translation lang="French (fr)">',
'<value form="guidance">comme sur le certificat de naissance</value>', # noqa
'<translation default="true()" lang="default">',
'<value form="guidance">as shown on birth certificate</value>',
"<hint ref=\"jr:itext('/data/name:hint')\"/>",
],
)
def test_guidance_hint_only(self):
"""Test guidance_hint only."""
self.assertPyxformXform(
name="data",
md="""
| survey | | | |
| | type | name | guidance_hint |
| | string | name | as shown on birth certificate |
""",
errored=True,
error__contains=["The survey element named 'name' has no label or hint."],
)
def test_multi_language_guidance_only(self): # pylint:disable=C0103
"""Test guidance_hint only in multiple languages."""
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | guidance_hint | guidance_hint::French (fr) |
| | string | name | as shown on birth certificate| comme sur le certificat de naissance |
""", # noqa
errored=True,
error__contains=["The survey element named 'name' has no label or hint."],
)
def test_multi_language_hint(self):
"""Test hint in multiple languages."""
self.assertPyxformXform(
name="data",
md="""
| survey | | | | |
| | type | name | hint | hint::French (fr) |
| | string | name | default language hint| French hint |
""", # noqa
xml__contains=[
"<hint ref=\"jr:itext('/data/name:hint')\"/>",
"<value>French hint</value>",
"<value>default language hint</value>",
],
) |
299,849 | handle | import contextlib
import logging
import re
import shutil
import urllib.parse
from pathlib import Path
from shutil import make_archive
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from django.test import Client, override_settings
from django.utils.timezone import override as override_timezone
from django_scopes import scope, scopes_disabled
from pretalx.common.signals import register_data_exporters
from pretalx.common.utils import rolledback_transaction
from pretalx.event.models import Event
@contextlib.contextmanager
def fake_admin(event):
with rolledback_transaction():
event.is_public = True
event.save()
client = Client()
def get(url):
try:
# Try getting the file from disk directly first, …
return get_mediastatic_content(url)
except FileNotFoundError:
# … then fall back to asking the views.
response = client.get(url, is_html_export=True, HTTP_ACCEPT="text/html")
content = get_content(response)
return content
yield get
def find_assets(html):
"""Find URLs of images, style sheets and scripts included in `html`."""
soup = BeautifulSoup(html, "lxml")
for asset in soup.find_all(["script", "img"]):
yield asset.attrs["src"]
for asset in soup.find_all(["link"]):
if asset.attrs["rel"][0] in ["icon", "stylesheet"]:
yield asset.attrs["href"]
def find_urls(css):
return re.findall(r'url\("?(/[^")]+)"?\)', css.decode("utf-8"), re.IGNORECASE)
def event_talk_urls(event):
for talk in event.talks:
yield talk.urls.public
yield talk.urls.ical
for resource in talk.active_resources:
if resource.resource and resource.resource.url:
yield resource.resource.url
def event_speaker_urls(event):
for speaker in event.speakers:
profile = speaker.event_profile(event)
yield profile.urls.public
yield profile.urls.talks_ical
def event_exporter_urls(event):
for _, exporter in register_data_exporters.send(event):
if exporter.public:
yield exporter(event).urls.base
def schedule_version_urls(event):
for schedule in event.schedules.filter(version__isnull=False):
yield schedule.urls.public
yield schedule.urls.widget_data
yield schedule.urls.nojs
def event_urls(event):
yield event.urls.base
yield event.urls.schedule
yield event.urls.schedule_nojs
yield event.urls.widget_data
yield from schedule_version_urls(event)
yield event.urls.featured
yield event.urls.talks
yield from event_talk_urls(event)
yield event.urls.speakers
yield from event_speaker_urls(event)
yield from event_exporter_urls(event)
yield event.urls.changelog
yield event.urls.feed
def get_path(url):
return urllib.parse.urlparse(url).path
def get_content(response):
return (
b"".join(response.streaming_content) if response.streaming else response.content
)
def dump_content(destination, path, getter):
logging.debug(path)
content = getter(path)
if path.endswith("/"):
path = path + "index.html"
path = (Path(destination) / path.lstrip("/")).resolve()
if not Path(destination) in path.parents:
raise CommandError("Path traversal detected, aborting.")
path.parent.mkdir(parents=True, exist_ok=True)
with open(path, "wb") as f:
f.write(content)
return content
def get_mediastatic_content(url):
if url.startswith(settings.STATIC_URL):
local_path = settings.STATIC_ROOT / url[len(settings.STATIC_URL) :]
elif url.startswith(settings.MEDIA_URL):
local_path = settings.MEDIA_ROOT / url[len(settings.MEDIA_URL) :]
else:
raise FileNotFoundError()
# Prevent directory traversal, make sure the path is inside the media or static root
local_path = local_path.resolve(strict=True)
if not any(
path in local_path.parents
for path in (settings.MEDIA_ROOT, settings.STATIC_ROOT)
):
raise FileNotFoundError()
with open(local_path, "rb") as f:
return f.read()
def export_event(event, destination):
with override_settings(
COMPRESS_ENABLED=True, COMPRESS_OFFLINE=True
), override_timezone(event.timezone):
with fake_admin(event) as get:
logging.info("Collecting URLs for export")
urls = [*event_urls(event)]
assets = set()
logging.info(f"Exporting {len(urls)} pages")
for url in map(get_path, urls):
content = dump_content(destination, url, get)
assets |= set(map(get_path, find_assets(content)))
css_assets = set()
logging.info(f"Exporting {len(assets)} static files from HTML links")
for url in assets:
content = dump_content(destination, url, get)
if url.endswith(".css"):
css_assets |= set(find_urls(content))
logging.info(f"Exporting {len(css_assets)} files from CSS links")
for url_path in (get_path(urllib.parse.unquote(url)) for url in css_assets):
dump_content(destination, url_path, get)
def delete_directory(path):
with contextlib.suppress(FileNotFoundError):
shutil.rmtree(path)
def get_export_path(event):
return settings.HTMLEXPORT_ROOT / event.slug
def get_export_zip_path(event):
return get_export_path(event).with_suffix(".zip")
class Command(BaseCommand):
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument("event", type=str)
parser.add_argument("--zip", action="store_true")
def METHOD_NAME(self, *args, **options):
event_slug = options.get("event")
with scopes_disabled():
try:
event = Event.objects.get(slug__iexact=event_slug)
except Event.DoesNotExist:
raise CommandError(f'Could not find event with slug "{event_slug}".')
with scope(event=event):
logging.info(f"Exporting {event.name}")
export_dir = get_export_path(event)
zip_path = get_export_zip_path(event)
tmp_dir = export_dir.with_name(export_dir.name + "-new")
delete_directory(tmp_dir)
tmp_dir.mkdir()
try:
export_event(event, tmp_dir)
delete_directory(export_dir)
tmp_dir.rename(export_dir)
finally:
delete_directory(tmp_dir)
logging.info(f"Exported to {export_dir}")
if options.get("zip"):
make_archive(
root_dir=settings.HTMLEXPORT_ROOT,
base_dir=event.slug,
base_name=zip_path.parent / zip_path.stem,
format="zip",
)
logging.info(f"Exported to {zip_path}") |
299,850 | test read endpoint config with cafile | import logging
from pathlib import Path
from typing import Text, Optional, Union
from unittest.mock import Mock
import pytest
from aioresponses import aioresponses
from rasa.shared.exceptions import FileNotFoundException
from tests.utilities import latest_request, json_of_latest_request
import rasa.utils.endpoints as endpoint_utils
@pytest.mark.parametrize(
"base, subpath, expected_result",
[
("https://example.com", None, "https://example.com"),
("https://example.com/test", None, "https://example.com/test"),
("https://example.com/", None, "https://example.com/"),
("https://example.com/", "test", "https://example.com/test"),
("https://example.com/", "test/", "https://example.com/test/"),
(
"http://duckling.rasa.com:8000",
"/parse",
"http://duckling.rasa.com:8000/parse",
),
(
"http://duckling.rasa.com:8000/",
"/parse",
"http://duckling.rasa.com:8000/parse",
),
],
)
def test_concat_url(base, subpath, expected_result):
assert endpoint_utils.concat_url(base, subpath) == expected_result
def test_warning_for_base_paths_with_trailing_slash(caplog):
test_path = "base/"
with caplog.at_level(logging.DEBUG, logger="rasa.utils.endpoints"):
assert endpoint_utils.concat_url(test_path, None) == test_path
assert len(caplog.records) == 1
async def test_endpoint_config():
with aioresponses() as mocked:
endpoint = endpoint_utils.EndpointConfig(
"https://example.com/",
params={"A": "B"},
headers={"X-Powered-By": "Rasa"},
basic_auth={"username": "user", "password": "pass"},
token="mytoken",
token_name="letoken",
type="redis",
port=6379,
db=0,
password="password",
timeout=30000,
)
mocked.post(
"https://example.com/test?A=B&P=1&letoken=mytoken",
payload={"ok": True},
repeat=True,
status=200,
)
await endpoint.request(
"post",
subpath="test",
content_type="application/text",
json={"c": "d"},
params={"P": "1"},
)
r = latest_request(
mocked, "post", "https://example.com/test?A=B&P=1&letoken=mytoken"
)
assert r
assert json_of_latest_request(r) == {"c": "d"}
assert r[-1].kwargs.get("params", {}).get("A") == "B"
assert r[-1].kwargs.get("params", {}).get("P") == "1"
assert r[-1].kwargs.get("params", {}).get("letoken") == "mytoken"
# unfortunately, the mock library won't report any headers stored on
# the session object, so we need to verify them separately
async with endpoint.session() as s:
assert s._default_headers.get("X-Powered-By") == "Rasa"
assert s._default_auth.login == "user"
assert s._default_auth.password == "pass"
async def test_endpoint_config_with_cafile(tmp_path: Path):
cafile = "data/test_endpoints/cert.pem"
with aioresponses() as mocked:
endpoint = endpoint_utils.EndpointConfig(
"https://example.com/", cafile=str(cafile)
)
mocked.post("https://example.com/", status=200)
await endpoint.request("post")
request = latest_request(mocked, "post", "https://example.com/")[-1]
ssl_context = request.kwargs["ssl"]
certs = ssl_context.get_ca_certs()
assert certs[0]["subject"][4][0] == ("organizationalUnitName", "rasa")
async def test_endpoint_config_with_non_existent_cafile(tmp_path: Path):
cafile = "data/test_endpoints/no_file.pem"
endpoint = endpoint_utils.EndpointConfig("https://example.com/", cafile=str(cafile))
with pytest.raises(FileNotFoundException):
await endpoint.request("post")
def test_endpoint_config_default_token_name():
test_data = {"url": "http://test", "token": "token"}
actual = endpoint_utils.EndpointConfig.from_dict(test_data)
assert actual.token_name == "token"
def test_endpoint_config_custom_token_name():
test_data = {"url": "http://test", "token": "token", "token_name": "test_token"}
actual = endpoint_utils.EndpointConfig.from_dict(test_data)
assert actual.token_name == "test_token"
async def test_request_non_json_response():
with aioresponses() as mocked:
endpoint = endpoint_utils.EndpointConfig("https://example.com/")
mocked.post(
"https://example.com/test",
payload="ok",
content_type="application/text",
status=200,
)
response = await endpoint.request("post", subpath="test")
assert not response
@pytest.mark.parametrize(
"filename, endpoint_type",
[("data/test_endpoints/example_endpoints.yml", "tracker_store")],
)
def test_read_endpoint_config(filename: Text, endpoint_type: Text):
conf = endpoint_utils.read_endpoint_config(filename, endpoint_type)
assert isinstance(conf, endpoint_utils.EndpointConfig)
@pytest.mark.parametrize(
"endpoint_type, cafile",
[("action_endpoint", "./some_test_file"), ("tracker_store", None)],
)
def METHOD_NAME(endpoint_type: Text, cafile: Optional[Text]):
conf = endpoint_utils.read_endpoint_config(
"data/test_endpoints/example_endpoints.yml", endpoint_type
)
assert conf.cafile == cafile
@pytest.mark.parametrize(
"filename, endpoint_type",
[
("", "tracker_store"),
("data/test_endpoints/example_endpoints.yml", "stuff"),
("data/test_endpoints/example_endpoints.yml", "empty"),
("/unknown/path.yml", "tracker_store"),
],
)
def test_read_endpoint_config_not_found(filename: Text, endpoint_type: Text):
conf = endpoint_utils.read_endpoint_config(filename, endpoint_type)
assert conf is None
@pytest.mark.parametrize(
"value, default, expected_result",
[
(None, True, True),
(False, True, False),
("false", True, False),
("true", False, True),
],
)
def test_bool_arg(
value: Optional[Union[bool, str]], default: bool, expected_result: bool
):
request = Mock()
request.args = {}
if value is not None:
request.args = {"key": value}
assert endpoint_utils.bool_arg(request, "key", default) == expected_result
@pytest.mark.parametrize(
"value, default, expected_result",
[(None, 0.5, 0.5), (0.5, None, 0.5), ("0.5", 0, 0.5), ("a", 0.5, 0.5)],
)
def test_float_arg(
value: Optional[Union[float, str]], default: float, expected_result: float
):
request = Mock()
request.args = {}
if value is not None:
request.args = {"key": value}
assert endpoint_utils.float_arg(request, "key", default) == expected_result
@pytest.mark.parametrize(
"value, default, expected_result",
[(None, 0, 0), (1, 0, 1), ("1", 0, 1), ("a", 0, 0)],
)
def test_int_arg(value: Optional[Union[int, str]], default: int, expected_result: int):
request = Mock()
request.args = {}
if value is not None:
request.args = {"key": value}
assert endpoint_utils.int_arg(request, "key", default) == expected_result |
299,851 | group ids | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetPrivateEndpointConnectionResult',
'AwaitableGetPrivateEndpointConnectionResult',
'get_private_endpoint_connection',
'get_private_endpoint_connection_output',
]
@pulumi.output_type
class GetPrivateEndpointConnectionResult:
"""
The Private Endpoint Connection resource.
"""
def __init__(__self__, METHOD_NAME=None, id=None, name=None, private_endpoint=None, private_link_service_connection_state=None, provisioning_state=None, type=None):
if METHOD_NAME and not isinstance(METHOD_NAME, list):
raise TypeError("Expected argument 'group_ids' to be a list")
pulumi.set(__self__, "group_ids", METHOD_NAME)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if private_endpoint and not isinstance(private_endpoint, dict):
raise TypeError("Expected argument 'private_endpoint' to be a dict")
pulumi.set(__self__, "private_endpoint", private_endpoint)
if private_link_service_connection_state and not isinstance(private_link_service_connection_state, dict):
raise TypeError("Expected argument 'private_link_service_connection_state' to be a dict")
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="groupIds")
def METHOD_NAME(self) -> Optional[Sequence[str]]:
"""
The provisioning state of the private endpoint connection resource.
"""
return pulumi.get(self, "group_ids")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> Optional['outputs.PrivateEndpointResponse']:
"""
The resource of private end point.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
A collection of information about the state of the connection between service consumer and provider.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning state of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetPrivateEndpointConnectionResult(GetPrivateEndpointConnectionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetPrivateEndpointConnectionResult(
METHOD_NAME=self.METHOD_NAME,
id=self.id,
name=self.name,
private_endpoint=self.private_endpoint,
private_link_service_connection_state=self.private_link_service_connection_state,
provisioning_state=self.provisioning_state,
type=self.type)
def get_private_endpoint_connection(environment_name: Optional[str] = None,
private_endpoint_connection_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetPrivateEndpointConnectionResult:
"""
Gets the details of the private endpoint connection of the environment in the given resource group.
Azure REST API version: 2021-03-31-preview.
:param str environment_name: The name of the Time Series Insights environment associated with the specified resource group.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: Name of an Azure Resource group.
"""
__args__ = dict()
__args__['environmentName'] = environment_name
__args__['privateEndpointConnectionName'] = private_endpoint_connection_name
__args__['resourceGroupName'] = resource_group_name
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('azure-native:timeseriesinsights:getPrivateEndpointConnection', __args__, opts=opts, typ=GetPrivateEndpointConnectionResult).value
return AwaitableGetPrivateEndpointConnectionResult(
METHOD_NAME=pulumi.get(__ret__, 'group_ids'),
id=pulumi.get(__ret__, 'id'),
name=pulumi.get(__ret__, 'name'),
private_endpoint=pulumi.get(__ret__, 'private_endpoint'),
private_link_service_connection_state=pulumi.get(__ret__, 'private_link_service_connection_state'),
provisioning_state=pulumi.get(__ret__, 'provisioning_state'),
type=pulumi.get(__ret__, 'type'))
@_utilities.lift_output_func(get_private_endpoint_connection)
def get_private_endpoint_connection_output(environment_name: Optional[pulumi.Input[str]] = None,
private_endpoint_connection_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetPrivateEndpointConnectionResult]:
"""
Gets the details of the private endpoint connection of the environment in the given resource group.
Azure REST API version: 2021-03-31-preview.
:param str environment_name: The name of the Time Series Insights environment associated with the specified resource group.
:param str private_endpoint_connection_name: The name of the private endpoint connection associated with the Azure resource
:param str resource_group_name: Name of an Azure Resource group.
"""
... |
299,852 | is available | # Copyright (c) 2021 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gym
import alf
from alf.environments.dmc_gym_wrapper import DMCGYMWrapper, dm_control
from alf.environments.suite_gym import wrap_env
def METHOD_NAME():
"""
Check if the required environment is installed.
"""
return dm_control is not None
@alf.configurable
def load(environment_name='cheetah:run',
from_pixels=True,
image_size=100,
env_id=None,
discount=1.0,
visualize_reward=False,
max_episode_steps=1000,
control_timestep=None,
gym_env_wrappers=(),
alf_env_wrappers=()):
""" Load a MuJoCo environment.
For installation of DMControl, see https://github.com/deepmind/dm_control.
For installation of MuJoCo210, see https://mujoco.org.
Args:
environment_name (str): this string must have the format
"domain_name:task_name", where "domain_name" is defined by DM control as
the physical model name, and "task_name" is an instance of the model
with a parcular MDP structure.
from_pixels (boolean): Output image if set to True.
image_size (int): The height and width of the output
image from the environment.
env_id (int): (optional) ID of the environment.
discount (float): Discount to use for the environment.
visualize_reward: if True, then the rendered frame will have
a highlighted color when the agent achieves a reward.
max_episode_steps (int): The maximum episode step in the environment.
control_timestep (float): the time duration between two agent actions. If
this is greater than the agent's primitive physics timestep, then
multiple physics simulation steps might be performed between two actions.
The difference between multi-physics steps and "action repeats"/FrameSkip
is that the intermediate physics step won't need to render an observation
(which might save time if rendering is costly). However, this also
means that unlike "action repeats"/FrameSkip which accumulates rewards
of several repeated steps, only a single-step reward is obtained after
all the physics simulation steps are done. The total number of
physics simulation steps in an episode is
``control_timestep / physics_timestep * frame_skip * max_episode_steps``.
If None, the default control timstep defined by DM control suite will
be used.
gym_env_wrappers (Iterable): Iterable with references to gym_wrappers
classes to use directly on the gym environment.
alf_env_wrappers (Iterable): Iterable with references to alf_wrappers
classes to use on the ALF environment. There will be an
AlfEnvironmentDMC2GYMWrapper added before any alf_wrappers.
Returns:
A wrapped AlfEnvironment
"""
names = environment_name.split(":")
assert len(names) == 2, (
"environment_name must be in the format 'domain_name:task_name'!"
f" Provided environment_name: {environment_name}")
domain_name, task_name = names
gym_env = DMCGYMWrapper(
domain_name=domain_name,
task_name=task_name,
visualize_reward=visualize_reward,
from_pixels=from_pixels,
control_timestep=control_timestep,
height=image_size,
width=image_size)
return wrap_env(
gym_env,
env_id=env_id,
discount=discount,
max_episode_steps=max_episode_steps,
gym_env_wrappers=gym_env_wrappers,
alf_env_wrappers=alf_env_wrappers,
image_channel_first=False) |
299,853 | test user profile updates | # -*- coding: utf-8 -*-
#
# RERO ILS
# Copyright (C) 2022-2023 RERO
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Users profile updates tests."""
from __future__ import absolute_import, print_function
import json
from flask import url_for
from invenio_accounts.testutils import login_user_via_session
from rero_ils.modules.patrons.api import Patron
from rero_ils.modules.patrons.models import CommunicationChannel
from rero_ils.modules.users.api import User
def METHOD_NAME(
client, patron_martigny, system_librarian_martigny, json_header,
mailbox):
"""Test users profile updates."""
# login with a patron has only the patron role, this means we are logging
# into the public interface
assert patron_martigny.patron['communication_channel'] == \
CommunicationChannel.MAIL
login_user_via_session(client, patron_martigny.user)
# mailbox is empty
assert not (len(mailbox))
user_metadata = User.get_record(patron_martigny.user.id).dumps_metadata()
# changing the email by another does not send any reset_password
# notification
user_metadata['email'] = 'toto@toto.com'
res = client.put(
url_for('api_users.users_item', id=patron_martigny.user.id),
data=json.dumps(user_metadata),
headers=json_header
)
assert res.status_code == 200
assert not (len(mailbox))
patron_martigny = Patron.get_record_by_pid(patron_martigny.pid)
# an email was added to patron, communication_channel will change
# automatically to email
assert patron_martigny.patron.get('communication_channel') == \
CommunicationChannel.EMAIL
# removing the email from profile does not send any reset_password
# notification
user_metadata.pop('email', None)
res = client.put(
url_for(
'api_users.users_item',
id=patron_martigny.user.id),
data=json.dumps(user_metadata),
headers=json_header
)
assert res.status_code == 200
assert not (len(mailbox))
# the corresponding patron changes its communication_channel to mail
# autmoatically if user has no email configured and patron has no
# additional_communication_email configured
patron_martigny = Patron.get_record_by_pid(patron_martigny.pid)
assert patron_martigny.patron.get('communication_channel') == \
CommunicationChannel.MAIL
# login as a system_librarian this means we are logging into the
# professional interface
login_user_via_session(client, system_librarian_martigny.user)
# adding an email to a profile does not send any reset_password
# notification
user_metadata['email'] = 'toto@toto.com'
res = client.put(
url_for(
'api_users.users_item',
id=patron_martigny.user.id),
data=json.dumps(user_metadata),
headers=json_header
)
assert res.status_code == 200
assert not (len(mailbox))
# removing the email from profile does not send any reset_password
# notification
user_metadata.pop('email', None)
res = client.put(
url_for(
'api_users.users_item',
id=patron_martigny.user.id),
data=json.dumps(user_metadata),
headers=json_header
)
assert res.status_code == 200
assert not (len(mailbox))
patron_martigny = Patron.get_record_by_pid(patron_martigny.pid)
assert patron_martigny.patron.get('communication_channel') == \
CommunicationChannel.MAIL
def test_user_birthdate(
client, patron_martigny, system_librarian_martigny, json_header):
"""Test user birth_date."""
login_user_via_session(client, system_librarian_martigny.user)
user_metadata = User.get_record(patron_martigny.user.id).dumps_metadata()
# Invalid date of birth
user_metadata['birth_date'] = '0070-01-01'
res = client.put(
url_for('api_users.users_item', id=patron_martigny.user.id),
data=json.dumps(user_metadata),
headers=json_header
)
assert res.status_code == 400
# Valid date of birth
user_metadata['birth_date'] = '1970-01-01'
res = client.put(
url_for('api_users.users_item', id=patron_martigny.user.id),
data=json.dumps(user_metadata),
headers=json_header
)
assert res.status_code == 200
user_metadata['birth_date'] = '2001-01-01'
res = client.put(
url_for('api_users.users_item', id=patron_martigny.user.id),
data=json.dumps(user_metadata),
headers=json_header
)
assert res.status_code == 200 |
299,854 | collater | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import FairseqDataset
class TransformEosDataset(FairseqDataset):
"""A :class:`~fairseq.data.FairseqDataset` wrapper that appends/prepends/strips EOS.
Note that the transformation is applied in :func:`collater`.
Args:
dataset (~fairseq.data.FairseqDataset): dataset to wrap
eos (int): index of the end-of-sentence symbol
append_eos_to_src (bool, optional): append EOS to the end of src
remove_eos_from_src (bool, optional): remove EOS from the end of src
append_eos_to_tgt (bool, optional): append EOS to the end of tgt
remove_eos_from_tgt (bool, optional): remove EOS from the end of tgt
"""
def __init__(
self,
dataset,
eos,
append_eos_to_src=False,
remove_eos_from_src=False,
append_eos_to_tgt=False,
remove_eos_from_tgt=False,
has_target=True,
):
if not isinstance(dataset, FairseqDataset):
raise ValueError("dataset must be an instance of FairseqDataset")
if append_eos_to_src and remove_eos_from_src:
raise ValueError("cannot combine append_eos_to_src and remove_eos_from_src")
if append_eos_to_tgt and remove_eos_from_tgt:
raise ValueError("cannot combine append_eos_to_tgt and remove_eos_from_tgt")
self.dataset = dataset
self.eos = torch.LongTensor([eos])
self.append_eos_to_src = append_eos_to_src
self.remove_eos_from_src = remove_eos_from_src
self.append_eos_to_tgt = append_eos_to_tgt
self.remove_eos_from_tgt = remove_eos_from_tgt
self.has_target = has_target
# precompute how we should adjust the reported sizes
self._src_delta = 0
self._src_delta += 1 if append_eos_to_src else 0
self._src_delta -= 1 if remove_eos_from_src else 0
self._tgt_delta = 0
self._tgt_delta += 1 if append_eos_to_tgt else 0
self._tgt_delta -= 1 if remove_eos_from_tgt else 0
self._checked_src = False
self._checked_tgt = False
def _check_src(self, src, expect_eos):
if not self._checked_src:
assert (src[-1] == self.eos[0]) == expect_eos
self._checked_src = True
def _check_tgt(self, tgt, expect_eos):
if self.has_target and not self._checked_tgt:
assert (tgt[-1] == self.eos[0]) == expect_eos
self._checked_tgt = True
def __getitem__(self, index):
return self.dataset[index]
def __len__(self):
return len(self.dataset)
def METHOD_NAME(self, samples):
def transform(item):
if self.append_eos_to_src:
self.eos = self.eos.to(device=item["source"].device)
self._check_src(item["source"], expect_eos=False)
item["source"] = torch.cat([item["source"], self.eos])
if self.remove_eos_from_src:
self.eos = self.eos.to(device=item["source"].device)
self._check_src(item["source"], expect_eos=True)
item["source"] = item["source"][:-1]
if self.append_eos_to_tgt:
self.eos = self.eos.to(device=item["target"].device)
self._check_tgt(item["target"], expect_eos=False)
item["target"] = torch.cat([item["target"], self.eos])
if self.remove_eos_from_tgt:
self.eos = self.eos.to(device=item["target"].device)
self._check_tgt(item["target"], expect_eos=True)
item["target"] = item["target"][:-1]
return item
samples = list(map(transform, samples))
return self.dataset.METHOD_NAME(samples)
def num_tokens(self, index):
return self.dataset.num_tokens(index)
def size(self, index):
if self.has_target:
src_len, tgt_len = self.dataset.size(index)
return (src_len + self._src_delta, tgt_len + self._tgt_delta)
else:
return self.dataset.size(index)
def ordered_indices(self):
# NOTE: we assume that the ordering does not change based on the
# addition or removal of eos
return self.dataset.ordered_indices()
@property
def supports_prefetch(self):
return getattr(self.dataset, "supports_prefetch", False)
def prefetch(self, indices):
return self.dataset.prefetch(indices) |
299,855 | increase preop step | from typing import Any, Dict, List, Optional
import torch
from .param_runtime_order import OrderedParamGenerator
class MemStats(object):
def __init__(self) -> None:
"""
Store the non model data statistics used for Gemini and GeminiOptimizer.
"""
# (preop_step, List[param])
self._step_param_dict = dict()
# (param, List[preop_step])
self._param_step_dict = dict()
# (preop_step, non_model_data) non model data used during preop_step ~ (preop_step+1)
self._step_nmd_dict = dict()
self._param_runtime_order = OrderedParamGenerator()
self._preop_step = 0
self._prev_overall_cuda = -1
self._max_overall_cuda = 0
self._prev_md_cuda = -1
# old version
self._model_data_cuda_list = []
self._model_data_cpu_list = []
self._overall_cuda_list = []
self._overall_cpu_list = []
self._non_model_data_cuda_list = []
self._non_model_data_cpu_list = []
def calc_max_cuda_non_model_data(self):
if self._prev_overall_cuda != -1 and self._prev_md_cuda != -1:
max_cuda_non_model_data = self._prev_overall_cuda - self._prev_md_cuda
self._step_nmd_dict[self._preop_step - 1] = max_cuda_non_model_data
# compatibility of the old version.
self._non_model_data_cuda_list.append(max_cuda_non_model_data)
def record_max_cuda_model_data(self, val):
self._prev_md_cuda = val
def record_max_cuda_overall_data(self, val):
self._prev_overall_cuda = val
self._max_overall_cuda = max(self._max_overall_cuda, val)
@property
def max_overall_cuda(self):
return self._max_overall_cuda
def METHOD_NAME(self, param_list: List[torch.nn.Parameter]):
"""
the time step is increased. param list is used between current and the next
time step.
Args:
param_list (List[torch.nn.Parameter]): a list of torch parameters.
"""
for p in param_list:
if p not in self._param_step_dict:
self._param_step_dict[p] = [self._preop_step]
else:
self._param_step_dict[p].append(self._preop_step)
self._param_runtime_order.append(p)
self._step_param_dict[self._preop_step] = param_list
self._preop_step += 1
def param_used_step(self, param: torch.nn.Parameter) -> Optional[List[int]]:
"""param_used_step
get the timestep list using the param
Args:
param (torch.nn.Parameter): a torch param
Returns:
Optional[List[int]]: a list of int indicates the time step of preop hook.
"""
if param not in self._param_step_dict:
return None
else:
return self._param_step_dict[param]
def param_order(self):
if self._param_runtime_order.is_empty():
raise RuntimeError
else:
return self._param_runtime_order
def non_model_data_list(self, device_type: str) -> List[int]:
if device_type == 'cuda':
return self._non_model_data_cuda_list
elif device_type == 'cpu':
return self._non_model_data_cpu_list
else:
raise TypeError
def max_non_model_data(self, device_type: str) -> float:
if device_type == 'cuda':
return max(self._non_model_data_cuda_list)
elif device_type == 'cpu':
return max(self._non_model_data_cpu_list)
else:
raise TypeError
def clear(self):
self._model_data_cuda_list = []
self._overall_cuda_list = []
self._model_data_cpu_list = []
self._overall_cpu_list = []
self._non_model_data_cpu_list = []
self._non_model_data_cuda_list = []
self._param_runtime_order.clear()
self._step_param_dict.clear()
self._param_step_dict.clear()
self._step_nmd_dict.clear()
self._preop_step = 0
self._prev_overall_cuda = -1
self._prev_md_cuda = -1 |
299,856 | main | #!/usr/bin/env python3
# Copyright (c) 2021, 2022, Oracle and/or its affiliates.
# Copyright (C) 1996-2021 Python Software Foundation
#
# Licensed under the PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
"""Takes two grammar files and diffs them"""
import sys
import re
import tokenize
from pegen.grammar import Alt, Grammar, GrammarVisitor, Rule
from pegen.grammar_parser import GeneratedParser as GrammarParser
from pegen.tokenizer import Tokenizer
class DiffVisitor(GrammarVisitor):
OLD_ESCAPE = "\033[9m\033[31m"
NEW_ESCAPE = "\033[32m"
NORMAL_ESCAPE = "\033[0m"
FUNCCALL_REGEXP = re.compile(r"(?:\( [a-zA-Z0-9_]+ \*\))?((?:new )?[a-zA-Z0-9_\.]+(?:\[\])?)(?: {|\().*")
def __init__(self, grammar1: Grammar, grammar2: Grammar):
self.grammar1, self.grammar2 = grammar1, grammar2
@classmethod
def old(cls, text: str):
return f"{cls.OLD_ESCAPE}{text}{cls.NORMAL_ESCAPE}"
@classmethod
def new(cls, text: str):
return f"{cls.NEW_ESCAPE}{text}{cls.NORMAL_ESCAPE}"
def diff(self):
self.rules = rules1 = {}
self.visit(self.grammar1)
del self.rules
self.rules = rules2 = {}
self.visit(self.grammar2)
del self.rules
rules_added = []
rules_removed = []
for rulename in rules1:
if rulename not in rules2:
rules_removed.append(rulename)
for rulename in rules2:
if rulename not in rules1:
rules_added.append(rulename)
rules_diff = []
replacement_functions = {}
for rulename, (type, actions) in rules1.items():
if rulename in rules2:
new_type, new_actions = rules2[rulename]
if type != new_type or actions != new_actions:
rules_diff.append(rulename)
for pattern,old_action in actions.items():
new_action = new_actions.get(pattern)
if new_action and new_action != old_action and (m1 := self.FUNCCALL_REGEXP.match(old_action)) and (m2 := self.FUNCCALL_REGEXP.match(new_action)):
replacement_functions.setdefault(m1.group(1), set()).add(m2.group(1))
if rules_added:
print(f"== Rules added [{len(rules_added)}]\n")
for rulename in rules_added:
new_type,new_actions = rules2[rulename]
print(f"\t{rulename}[{new_type}]")
for pattern,action in new_actions.items():
print(f"\t\t| {pattern} {{{action}}}")
print()
if rules_removed:
print(f"== Rules removed [{len(rules_removed)}]\n")
for rulename in rules_removed:
old_type,old_actions = rules1[rulename]
print(f"\t{rulename}[{old_type}]")
for pattern,action in old_actions.items():
print(f"\t\t| {pattern} {{{action}}}")
print()
if rules_diff:
print(f"== Rule differences [{len(rules_diff)}]\n")
for rulename in rules_diff:
old_type,old_actions = rules1[rulename]
new_type,new_actions = rules2[rulename]
print(f"\t{rulename}", end="")
if old_type != new_type:
print(f"[{self.old(old_type)}{self.new(new_type)}]")
else:
print(f"[{old_type}]")
for pattern,old_action in old_actions.items():
print(f"\t\t| ", end="")
if pattern in new_actions:
print(pattern, end=" ")
new_action = new_actions[pattern]
if old_action != new_action:
print(f"{{{self.old(old_action)}{self.new(new_action)}}}")
else:
print(f"{{{old_action}}}")
else:
print(self.old(f"{pattern} {{{old_action}}}"))
for pattern,new_action in new_actions.items():
if pattern not in old_actions:
print(self.new(f"\t\t| {pattern} {{{new_action}}}"))
print()
unchanged_rules = set(rules1.keys()) - set(rules_diff) - set(rules_removed)
if unchanged_rules:
print(f"== Unchanged rules [{len(unchanged_rules)}]")
print("\n\t", "\n\t".join(sorted(unchanged_rules)), "\n", sep="")
if replacement_functions:
print(f"== Typical replacement functions\n")
for old,new in sorted(replacement_functions.items()):
print(f"\t{old}", "->", self.new(", ".join(new)))
print()
def visit_Rule(self, node: Rule):
self.actions = {}
self.visit(node.rhs)
self.rules[node.name] = (node.type, self.actions)
del self.actions
def visit_Alt(self, node: Alt):
action = re.sub(r" ([\.,\(\)\[\]]) ", r"\1", str(node.action)) # shorten action string
self.actions[" ".join(str(item) for item in node.items)] = action
self.generic_visit(node)
def METHOD_NAME():
if len(sys.argv) == 3:
grammar_files = map(lambda f: open(f), sys.argv[1:])
elif len(sys.argv) == 2 and not sys.stdin.isatty():
grammar_files = [sys.stdin, open(sys.argv[1])]
else:
sys.exit("\n".join([
"Usage:",
f"\t\t{sys.argv[0]} GRAMMAR_FILE_OLD GRAMMAR_FILE_NEW",
"\tor",
f"\t\tcat GRAMMAR_FILE_OLD | {sys.argv[0]} GRAMMAR_FILE_NEW"
]))
grammars = []
for grammar_file in grammar_files:
with grammar_file as file:
tokenizer = Tokenizer(tokenize.generate_tokens(file.readline))
parser = GrammarParser(tokenizer)
grammar = parser.start()
if not grammar:
sys.exit(f"Failed to parse {grammar_file}")
grammars.append(grammar)
DiffVisitor(*grammars).diff()
if __name__ == "__main__":
METHOD_NAME() |
299,857 | test categorical is in range | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multinomial generation ops in the XLA JIT compiler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import stateless_random_ops
from tensorflow.python.platform import googletest
# TODO(srvasude): Merge this with
# third_party/tensorflow/python/kernel_tests/random/multinomial_op_test.py.
class CategoricalTest(xla_test.XLATestCase):
"""Test cases for random-number generating operators."""
def output_dtypes(self):
return set(self.int_types).intersection([np.int32, np.int64])
def _chi2(self, expected, actual):
"""Returns Chi2 GOF statistic."""
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected)
return chi2
def _do_sampling(self, logits, num_samples):
"""Categorical samples from given input.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with self.session(), self.test_scope():
random_seed.set_random_seed(1618)
op = random_ops.multinomial(logits, num_samples,
output_dtype=dtypes.int32)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _testRngIsNotConstant(self, rng, dtype, output_dtype):
# Tests that 'rng' does not always return the same value.
with self.session():
with self.test_scope():
x = rng(dtype, output_dtype)
# The random-number generator, if working correctly, should produce the
# same output multiple times with low probability.
y = self.evaluate(x)
z = self.evaluate(x)
w = self.evaluate(x)
# We use exact equality here. If the random-number generator is producing
# deterministic output, all three outputs will be bitwise identical.
self.assertTrue((not np.array_equal(y, z)) or
(not np.array_equal(z, w)) or
(not np.array_equal(y, w)))
def testCategoricalIsNotConstant(self):
def rng(dtype, output_dtype):
return random_ops.multinomial(np.array([[1., 1., 1.]], dtype=dtype), 10,
output_dtype=output_dtype)
dtype = np.float32
for output_dtype in self.output_dtypes():
self._testRngIsNotConstant(rng, dtype, output_dtype)
def METHOD_NAME(self):
for dtype in self.float_types:
for output_dtype in self.output_dtypes():
with self.session():
with self.test_scope():
x = random_ops.multinomial(
array_ops.ones(shape=[1, 20], dtype=dtype), 1000,
output_dtype=output_dtype)
y = self.evaluate(x)
self.assertTrue((y >= 0).sum() == 1000)
self.assertTrue((y < 20).sum() == 1000)
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 40000
rand_probs = np.random.dirichlet([1., 1., 2., 3.])
rand_probs2 = np.random.dirichlet([1., 4., 5.], size=3) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
freqs = self._do_sampling(logits, num_samples)
# the test here is similar to
# python/kernel_tests/random/multinomial_op_test.py
# Note that df >= 1 in all these cases. Choosing a cutoff of 1e-3
# corresponds to an alpha value of 2.5% for df = 1, and smaller for larger
# df.
chi2 = self._chi2(probs, freqs)
self.assertLess(chi2, 1e-3)
def testStatelessMultinomialIsInRange(self):
for dtype in self.float_types.intersection(
[dtypes.float32, dtypes.bfloat16]):
for output_dtype in self.output_dtypes():
with self.session() as sess:
with self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless_random_ops.stateless_multinomial(
array_ops.ones(shape=[1, 20], dtype=dtype),
1000,
seed_t,
output_dtype=output_dtype)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef12]})
self.assertTrue((y >= 0).sum() == 1000)
self.assertTrue((y < 20).sum() == 1000)
def testDeterminismMultinomial(self):
# Stateless values should be equal iff the seeds are equal (roughly)
num_samples = 10
with self.session(), self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
seeds = [(x, y) for x in range(5) for y in range(5)] * 3
for logits in ([[0.1, 0.25, 0.5, 0.15]], [[0.5, 0.5], [0.8, 0.2],
[0.25, 0.75]]):
pure = stateless_random_ops.stateless_multinomial(
logits, num_samples, seed=seed_t)
values = [(seed, pure.eval(feed_dict={seed_t: seed})) for seed in seeds]
for s0, v0 in values:
for s1, v1 in values:
self.assertEqual(s0 == s1, np.all(v0 == v1))
def testEmpty(self):
with self.session():
with self.test_scope():
x = random_ops.multinomial(
array_ops.zeros([42, 40]), 0, output_dtype=dtypes.int32)
y = self.evaluate(x)
self.assertEqual(y.shape, (42, 0))
def testEmptyStateless(self):
with self.session() as sess:
with self.test_scope():
seed_t = array_ops.placeholder(dtypes.int32, shape=[2])
x = stateless_random_ops.stateless_multinomial(
array_ops.zeros([42, 40]),
0,
seed=seed_t,
output_dtype=dtypes.int32)
y = sess.run(x, {seed_t: [0x12345678, 0xabcdef1]})
self.assertEqual(y.shape, (42, 0))
if __name__ == '__main__':
googletest.main() |
299,858 | handle | import random
import re
import string
import time
import requests
from django.core.management import BaseCommand
from ajapaik.ajapaik import forms
class Command(BaseCommand):
help = 'Register user and run predefined requests against API'
baseurl = 'http://localhost:8000'
tests = [
{
'url': '^/api/v1/user/me/',
'result': '"error":0',
'timeout': 1000
},
{
'url': '^/api/v1/album/state/?id=10',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/album/photos/search/',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/albums/',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/albums/search/?query=Finland',
'result': '"error":0',
'timeout': 1000
},
{
'url': '^/api/v1/photo/state/?id=8',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/album/nearest/?range=20000&longitude=22.306113839149475&latitude=60.41823327541351',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/finna/nearest/?range=20000&longitude=22.306285500526428&latitude=60.41835129261017',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/source/?query=finna.fi',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/photos/search/?query=Turku',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/wikidocumentaries/photos/?id=Q19588',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/wikidocumentaries/?query=linkkitorni',
'result': '"error":0',
'timeout': 1000
},
{
'url': '^/api/v1/wikidocumentaries/?query=Pasila',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/wikidocumentaries/?query=Pasila&lat=60&lon=23',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/photo/favorite/set/?id=8',
'result': '"error":0',
'timeout': 1000
},
{
'url': '^/api/v1/photo/fetch-hkm-finna/?id=https://www.finna.fi/Record/hkm.HKMS000005:km0000penx',
'result': '"error":0',
'timeout': 1000
},
{
'url': '^/api/v1/photos/favorite/order-by-distance-to-location/',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/photos/filtered/rephotographed-by-user/',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/photos/search/',
'result': 'photos',
'timeout': 1000
},
{
'url': '^/api/v1/logout/',
'result': '"error":',
'timeout': 1000
},
]
def add_arguments(self, parser):
parser.add_argument('-b', '--baseurl', type=str,
help='Url prefix for the queries. Example: https://staging.ajapaik.ee')
# Optional argument
# parser.add_argument('-p', '--prefix', type=str, help='Define a username prefix', )
def test_request(self, url, method, data, expected_result, session=False):
if not session:
session = requests.Session()
starttime = time.time()
try:
if method == 'post':
contents = session.post(url, data).text
else:
contents = session.get(url).text
if (re.search(expected_result, contents)):
status = 'OK'
else:
status = 'ERROR'
print(url, '\t', status, '\n')
print(contents)
exit(1)
except requests.exceptions.RequestException as e:
status = e
endtime = time.time()
print(status, '\t', url, '\t', round(endtime - starttime, 6), )
return session
def randomString(self, stringLength=10):
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
# create user
def test_register(self, username, password, firstname, lastname, expected_result):
url = f'{self.baseurl}/api/v1/register/'
data = {
'type': forms.APILoginForm.LOGIN_TYPE_AJAPAIK,
'username': username,
'password': password,
'firstname': firstname,
'lastname': lastname
}
session = self.test_request(url, 'post', data, expected_result)
return session
# Test api-login with username and password
def test_login(self, username, password, expected_result):
url = f'{self.baseurl}/api/v1/login/'
data = {
'type': forms.APILoginForm.LOGIN_TYPE_AJAPAIK,
'username': username,
'password': password
}
session = self.test_request(url, 'post', data, expected_result)
return session
# Test api-logout
def test_logout(self, expected_result, session=False):
url = f'{self.baseurl}/api/v1/logout/'
self.test_request(url, 'get', {}, expected_result, session)
# Http basic auth and normal urls
def run_tests(self, username='', password=''):
for t in self.tests:
url = t['url'].replace('^', self.baseurl)
starttime = time.time()
status = ''
session = requests.Session()
if username and password:
session.auth = (username, password)
try:
contents = session.get(url).text
if (re.search(t['result'], contents)):
status = 'OK'
else:
status = 'ERROR'
print(url, '\t', status, '\n')
print(contents)
exit(1)
except requests.exceptions.RequestException as e:
status = e.reason
endtime = time.time()
print(status, '\t', url, '\t', round(endtime - starttime, 6), )
def METHOD_NAME(self, *args, **options):
if options["baseurl"]:
self.baseurl = options["baseurl"]
randomname = self.randomString(10)
username = f'{randomname}-ajapaik-test@gmail.com'
password = self.randomString(16)
firstname = f'first {randomname}'
lastname = f'last {randomname}'
session = self.test_register(username, password, firstname, lastname, '{"error":0')
print('\ntesting username/password login')
self.test_logout('{"error":2')
session = self.test_login(username, password, '{"error":0')
self.test_logout('{"error":0', session)
self.test_login(f'{username}foo', password, '{"error":10')
self.test_logout('{"error":2')
print('\nlogged out')
self.run_tests()
print('\nlogged in as', username)
self.run_tests(username, password) |
299,859 | is qpwa | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import abc
from typing import Any, List, Tuple
import scipy.sparse as sp
import cvxpy.lin_ops.lin_op as lo
import cvxpy.lin_ops.lin_utils as lu
import cvxpy.utilities as u
from cvxpy.atoms.atom import Atom
from cvxpy.cvxcore.python import canonInterface
from cvxpy.expressions.constants import Constant
from cvxpy.utilities import performance_utils as perf
class AffAtom(Atom):
""" Abstract base class for affine atoms. """
__metaclass__ = abc.ABCMeta
_allow_complex = True
def sign_from_args(self) -> Tuple[bool, bool]:
"""By default, the sign is the most general of all the argument signs.
"""
return u.sign.sum_signs([arg for arg in self.args])
def is_imag(self) -> bool:
"""Is the expression imaginary?
"""
# Default is most generic argument.
return all(arg.is_imag() for arg in self.args)
def is_complex(self) -> bool:
"""Is the expression complex valued?
"""
# Default is most generic argument.
return any(arg.is_complex() for arg in self.args)
def is_atom_convex(self) -> bool:
"""Is the atom convex?
"""
return True
def is_atom_concave(self) -> bool:
"""Is the atom concave?
"""
return True
def is_incr(self, idx) -> bool:
"""Is the composition non-decreasing in argument idx?
"""
# Defaults to increasing.
return True
def is_decr(self, idx) -> bool:
"""Is the composition non-increasing in argument idx?
"""
# Defaults to increasing.
return False
def is_quadratic(self) -> bool:
return all(arg.is_quadratic() for arg in self.args)
def has_quadratic_term(self) -> bool:
"""Does the affine head of the expression contain a quadratic term?
The affine head is all nodes with a path to the root node
that does not pass through any non-affine atom. If the root node
is non-affine, then the affine head is the root alone.
"""
return any(arg.has_quadratic_term() for arg in self.args)
def METHOD_NAME(self) -> bool:
return all(arg.METHOD_NAME() for arg in self.args)
def is_pwl(self) -> bool:
return all(arg.is_pwl() for arg in self.args)
# TODO is this right?
@perf.compute_once
def is_psd(self) -> bool:
"""Is the expression a positive semidefinite matrix?
"""
for idx, arg in enumerate(self.args):
if not ((self.is_incr(idx) and arg.is_psd()) or
(self.is_decr(idx) and arg.is_nsd())):
return False
return True
@perf.compute_once
def is_nsd(self) -> bool:
"""Is the expression a positive semidefinite matrix?
"""
for idx, arg in enumerate(self.args):
if not ((self.is_decr(idx) and arg.is_psd()) or
(self.is_incr(idx) and arg.is_nsd())):
return False
return True
def _grad(self, values) -> List[Any]:
"""Gives the (sub/super)gradient of the atom w.r.t. each argument.
Matrix expressions are vectorized, so the gradient is a matrix.
Args:
values: A list of numeric values for the arguments.
Returns:
A list of SciPy CSC sparse matrices or None.
"""
# TODO should be a simple function in cvxcore for this.
# Make a fake lin op tree for the function.
fake_args = []
var_offsets = {}
offset = 0
for idx, arg in enumerate(self.args):
if arg.is_constant():
fake_args += [Constant(arg.value).canonical_form[0]]
else:
fake_args += [lu.create_var(arg.shape, idx)]
var_offsets[idx] = offset
offset += arg.size
var_length = offset
fake_expr, _ = self.graph_implementation(fake_args, self.shape,
self.get_data())
param_to_size = {lo.CONSTANT_ID: 1}
param_to_col = {lo.CONSTANT_ID: 0}
# Get the matrix representation of the function.
canon_mat = canonInterface.get_problem_matrix(
[fake_expr],
var_length,
var_offsets,
param_to_size,
param_to_col,
self.size,
)
# HACK TODO TODO convert tensors back to vectors.
# COO = (V[lo.CONSTANT_ID][0], (J[lo.CONSTANT_ID][0], I[lo.CONSTANT_ID][0]))
shape = (var_length + 1, self.size)
stacked_grad = canon_mat.reshape(shape).tocsc()[:-1, :]
# Break up into per argument matrices.
grad_list = []
start = 0
for arg in self.args:
if arg.is_constant():
grad_shape = (arg.size, shape[1])
if grad_shape == (1, 1):
grad_list += [0]
else:
grad_list += [sp.coo_matrix(grad_shape, dtype='float64')]
else:
stop = start + arg.size
grad_list += [stacked_grad[start:stop, :]]
start = stop
return grad_list |
299,860 | transform version | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from typing import Any, List, Optional, cast # noqa: F401
import requests # noqa: F401
from six import raise_from
from datadog_checks.base import AgentCheck
from datadog_checks.base.utils.db import QueryManager
from .client import Client
from .config import Config
from .types import Instance
class VoltDBCheck(AgentCheck):
__NAMESPACE__ = 'voltdb'
def __init__(self, name, init_config, instances):
# type: (str, dict, list) -> None
super(VoltDBCheck, self).__init__(name, init_config, instances)
self._config = Config(cast(Instance, self.instance), debug=self.log.debug)
self.register_secret(self._config.password)
self._client = Client(
url=self._config.url,
http_get=self.http.get,
username=self._config.username,
password=self._config.password,
password_hashed=self._config.password_hashed,
)
self._query_manager = QueryManager(
self,
self._execute_query_raw,
queries=self._config.queries,
tags=self._config.tags,
)
self.check_initializations.append(self._query_manager.compile_queries)
def _raise_for_status_with_details(self, response):
# type: (requests.Response) -> None
try:
response.raise_for_status()
except Exception as exc:
message = 'Error response from VoltDB: {}'.format(exc)
try:
# Try including detailed error message from response.
details = response.json()['statusstring']
except Exception:
pass
else:
message += ' (details: {})'.format(details)
raise_from(Exception(message), exc)
def _fetch_version(self):
# type: () -> Optional[str]
# See: https://docs.voltdb.com/UsingVoltDB/sysprocsysteminfo.php#sysprocsysinforetvalovervw
response = self._client.request('@SystemInformation', parameters=['OVERVIEW'])
self._raise_for_status_with_details(response)
data = response.json()
rows = data['results'][0]['data'] # type: List[tuple]
# NOTE: there will be one VERSION row per server in the cluster.
# Arbitrarily use the first one we see.
for _, column, value in rows:
if column == 'VERSION':
return self.METHOD_NAME(value)
self.log.debug('VERSION column not found: %s', [column for _, column, _ in rows])
return None
def METHOD_NAME(self, raw):
# type: (str) -> Optional[str]
# VoltDB does not include .0 patch numbers (eg 10.0, not 10.0.0).
# Need to ensure they're present so the version is always in 3 parts: major.minor.patch.
try:
major, rest = raw.split('.', 1)
except ValueError:
self.log.debug('Malformed version string: %s', raw)
return None
minor, found, patch = rest.partition('.')
if not found:
patch = '0'
return '{}.{}.{}'.format(major, minor, patch)
@AgentCheck.metadata_entrypoint
def _submit_version(self, version):
# type: (str) -> None
self.set_metadata('version', version)
def _check_can_connect_and_submit_version(self):
# type () -> None
host, port = self._config.netloc
tags = ['host:{}'.format(host), 'port:{}'.format(port)] + self._config.tags
try:
version = self._fetch_version()
except Exception as exc:
message = 'Unable to connect to VoltDB: {}'.format(exc)
self.service_check('can_connect', self.CRITICAL, message=message, tags=tags)
raise
self.service_check('can_connect', self.OK, tags=tags)
if version is not None:
self._submit_version(version)
def _execute_query_raw(self, query):
# type: (str) -> List[tuple]
# Ad-hoc format, close to the HTTP API format.
# Eg 'A:[B, C]' -> '?Procedure=A&Parameters=[B, C]'
procedure, _, parameters = query.partition(":")
response = self._client.request(procedure, parameters=parameters)
self._raise_for_status_with_details(response)
data = response.json()
return data['results'][0]['data']
def check(self, _):
# type: (Any) -> None
self._check_can_connect_and_submit_version()
self._query_manager.execute() |
299,861 | tail | # This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
import os
import re
from urllib.parse import urljoin
from urllib.request import pathname2url
from ...styleds import Paragraph
from ...text import MixedStyledText
from ...util import NotImplementedAttribute
from ... import DATA_PATH
from .. import (TreeNode, InlineNode, BodyNode, BodySubNode, GroupingNode,
DummyNode, TreeNodeMeta)
__all__ = ['filter', 'strip_and_filter',
'ElementTreeNode', 'ElementTreeInlineNode', 'ElementTreeBodyNode',
'ElementTreeBodySubNode', 'ElementTreeGroupingNode',
'ElementTreeMixedContentNode', 'ElementTreeDummyNode',
'ElementTreeNodeMeta']
CATALOG_PATH = os.path.join(DATA_PATH, 'xml', 'catalog')
CATALOG_URL = urljoin('file:', pathname2url(CATALOG_PATH))
CATALOG_NS = "urn:oasis:names:tc:entity:xmlns:xml:catalog"
RE_WHITESPACE = re.compile('[\t\r\n ]+')
def ends_with_space(node):
while node.getchildren():
node = node.getchildren()[-1]
if node.METHOD_NAME:
text = node.METHOD_NAME
break
else:
text = node.text or ''
return text.endswith(' ')
def filter_styled_text_node(node, strip_leading_ws):
styled_text = node.styled_text(strip_leading_ws)
if styled_text:
yield styled_text, ends_with_space(node)
def strip_and_filter(text, strip_leading_whitespace):
if not text:
return
if strip_leading_whitespace:
text = text.lstrip()
if text:
yield text, text.endswith(' ')
def filter_whitespace(text, children, strip_leading_ws):
for item, strip_leading_ws in strip_and_filter(text, strip_leading_ws):
yield item
for child in children:
for result in filter_styled_text_node(child, strip_leading_ws):
styled_text, strip_leading_ws = result
yield styled_text
for item, strip_leading_ws in strip_and_filter(child.METHOD_NAME,
strip_leading_ws):
yield item
def process_content(text, children, strip_leading_whitespace=True, style=None):
text_items = filter_whitespace(text, children, strip_leading_whitespace)
return MixedStyledText([item for item in text_items], style=style)
class ElementTreeNode(TreeNode):
NAMESPACE = NotImplementedAttribute()
@classmethod
def strip_namespace(cls, tag):
if '{' in tag:
assert tag.startswith('{{{}}}'.format(cls.NAMESPACE))
return tag[tag.find('}') + 1:]
else:
return tag
@classmethod
def node_tag_name(cls, node):
return cls.strip_namespace(node.tag)
@staticmethod
def node_parent(node):
return node._parent
@staticmethod
def node_children(node):
return node.getchildren()
@property
def location(self):
return self.node._root._filename, self.node.sourceline, self.tag_name
@property
def _id(self):
return self.get('id')
@property
def _location(self):
return self.node_location(self.node)
@property
def filename(self):
return self.node._root._filename
@property
def text(self):
if self.node.text:
if self.get('xml:space') == 'preserve':
return self.node.text
else:
return RE_WHITESPACE.sub(' ', self.node.text)
else:
return ''
@property
def METHOD_NAME(self):
if self.node.METHOD_NAME:
return RE_WHITESPACE.sub(' ', self.node.METHOD_NAME)
else:
return None
@property
def attributes(self):
return self.node.attrib
def get(self, key, default=None):
return self.node.get(key, default)
def __getitem__(self, name):
return self.node[name]
def process_content(self, strip_leading_whitespace=True, style=None):
return process_content(self.text, self.getchildren(),
strip_leading_whitespace, style=style)
class ElementTreeInlineNode(ElementTreeNode, InlineNode):
def styled_text(self, strip_leading_whitespace=False):
return self.build_styled_text(strip_leading_whitespace)
def build_styled_text(self, strip_leading_whitespace=False):
return self.process_content(strip_leading_whitespace, style=self.style)
class ElementTreeBodyNode(ElementTreeNode, BodyNode):
def flowables(self):
classes = self.get('classes')
for flowable in super().flowables():
flowable.classes = classes
yield flowable
class ElementTreeBodySubNode(ElementTreeNode, BodySubNode):
pass
class ElementTreeGroupingNode(ElementTreeBodyNode, GroupingNode):
pass
class ElementTreeMixedContentNode(ElementTreeGroupingNode):
def children_flowables(self):
strip_leading_ws = True
paragraph = []
for item, strip_leading_ws in strip_and_filter(self.text,
strip_leading_ws):
paragraph.append(item)
for child in self.getchildren():
try:
for result in filter_styled_text_node(child, strip_leading_ws):
styled_text, strip_leading_ws = result
paragraph.append(styled_text)
except AttributeError:
if paragraph and paragraph[0]:
yield Paragraph(paragraph)
paragraph = []
for flowable in child.flowables():
yield flowable
for item, strip_leading_ws \
in strip_and_filter(child.METHOD_NAME, strip_leading_ws):
paragraph.append(item)
if paragraph and paragraph[0]:
yield Paragraph(paragraph)
class ElementTreeDummyNode(ElementTreeNode, DummyNode):
pass
class ElementTreeNodeMeta(TreeNodeMeta):
root = ElementTreeNode
bases = (ElementTreeInlineNode, ElementTreeBodyNode, ElementTreeBodySubNode,
ElementTreeGroupingNode, ElementTreeDummyNode) |
299,862 | set box color | """
NCL_box_3.py
===============
This script illustrates the following concepts:
- Drawing box plots
- Adding markers to a box plot
- Setting the color of individual boxes in a box plot
- Setting the width of individual boxes in a box plot
See following URLs to see the reproduced NCL plot & script:
- Original NCL script: https://www.ncl.ucar.edu/Applications/Scripts/box_3.ncl
- Original NCL plot: https://www.ncl.ucar.edu/Applications/Images/box_3_lg.png
"""
###############################################################################
# Import packages:
import numpy as np
import matplotlib.pyplot as plt
import geocat.viz as gv
###############################################################################
# Generate fake data
np.random.seed(200)
data = np.random.lognormal(size=(40, 3), mean=1, sigma=.7)
for a in range(len(data)):
data[a] = [x - 4 for x in data[a]]
###############################################################################
# Helper function to set edge color of boxes
def METHOD_NAME(boxplot, colors):
# Set edge color of the outside and median lines of the boxes
for element in ['boxes', 'medians']:
for box, color in zip(boxplot[element], colors):
plt.setp(box, color=color)
# Set the color of the whiskers and caps of the boxes
for element in ['whiskers', 'caps']:
for box, color in zip(
zip(boxplot[element][::2], boxplot[element][1::2]), colors):
plt.setp(box, color=color)
###############################################################################
# Helper function to remove axis "spines" on the top and right sides
def removeSpines(ax):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
###############################################################################
# Plot:
# Create figure and axis
fig, ax = plt.subplots(figsize=(6, 6))
# Plot each boxplot, set tick labels, and determine box widths
boxplots = ax.boxplot(data,
labels=['Control', '-2Xna', '2Xna'],
widths=[0.4, 0.4, 0.4],
showfliers=False)
# Set whisker style to dashed
plt.setp(boxplots['whiskers'], linestyle='--')
# Set boxplot edge colors
METHOD_NAME(boxplots, ['blue', 'red', 'green'])
# Use geocat.viz.util convenience function to set axes tick values
gv.set_axes_limits_and_ticks(ax, ylim=(-6.0, 9.0), yticks=[-3.0, 0.0, 3.0, 6.0])
# Use geocat.viz.util convenience function to add minor and major tick lines
gv.add_major_minor_ticks(ax,
y_minor_per_major=3,
x_minor_per_major=1,
labelsize=14)
# Use geocat.viz.util convenience function to add title to the plot axis.
gv.set_titles_and_labels(ax, maintitle='Box Plot with Polymarkers')
# Make both major and minor ticks point inwards towards the plot
ax.tick_params(direction="in", which='both')
# Get rid of right and top axis spines
removeSpines(ax)
# Set ticks only at left and bottom sides of plot
ax.yaxis.set_ticks_position('left')
ax.xaxis.set_ticks_position('bottom')
# Add another partially transparent axis on top of the first one
ax2 = ax.inset_axes([0, 0, 1, 1])
ax2.patch.set_alpha(0.2)
# Set limits of second axis so markers will line up with boxes on boxplot
ax2.set_xlim(0, 6)
ax2.set_ylim(-6, 9)
# Turn both major and minor ticks in overlayed axis off
ax2.tick_params(which='both',
top=False,
bottom=False,
left=False,
right=False,
labelleft=False,
labelbottom=False)
# Get rid of right and top axis spines
removeSpines(ax2)
# Plot red x markers
ax2.scatter(1, 7.7, marker='x', color='red', linewidth=.5, s=100)
ax2.scatter(3, 2.5, marker='x', color='red', linewidth=.5, s=100)
ax2.scatter(5, 2, marker='x', color='red', linewidth=.5, s=100)
# Plot blue o markers
ax2.scatter(1, 2, marker='o', color='darkblue')
ax2.scatter(3, -0.5, marker='o', color='darkblue')
ax2.scatter(5, 1, marker='o', color='darkblue')
plt.show() |
299,863 | foo | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import os
import sys
import types
from twisted.python import rebuild
from twisted.trial.unittest import TestCase
from . import crash_test_dummy
f = crash_test_dummy.METHOD_NAME
class Foo:
pass
class Bar(Foo):
pass
class Baz:
pass
class Buz(Bar, Baz):
pass
class HashRaisesRuntimeError:
"""
Things that don't hash (raise an Exception) should be ignored by the
rebuilder.
@ivar hashCalled: C{bool} set to True when __hash__ is called.
"""
def __init__(self) -> None:
self.hashCalled = False
def __hash__(self):
self.hashCalled = True
raise RuntimeError("not a TypeError!")
# Set in test_hashException
unhashableObject = None
class RebuildTests(TestCase):
"""
Simple testcase for rebuilding, to at least exercise the code.
"""
def setUp(self) -> None:
self.libPath = self.mktemp()
os.mkdir(self.libPath)
self.fakelibPath = os.path.join(self.libPath, "twisted_rebuild_fakelib")
os.mkdir(self.fakelibPath)
open(os.path.join(self.fakelibPath, "__init__.py"), "w").close()
sys.path.insert(0, self.libPath)
def tearDown(self) -> None:
sys.path.remove(self.libPath)
def test_FileRebuild(self) -> None:
import shutil
import time
from twisted.python.util import sibpath
shutil.copyfile(
sibpath(__file__, "myrebuilder1.py"),
os.path.join(self.fakelibPath, "myrebuilder.py"),
)
from twisted_rebuild_fakelib import myrebuilder # type: ignore[import]
a = myrebuilder.A()
b = myrebuilder.B()
i = myrebuilder.Inherit()
self.assertEqual(a.a(), "a")
# Necessary because the file has not "changed" if a second has not gone
# by in unix. This sucks, but it's not often that you'll be doing more
# than one reload per second.
time.sleep(1.1)
shutil.copyfile(
sibpath(__file__, "myrebuilder2.py"),
os.path.join(self.fakelibPath, "myrebuilder.py"),
)
rebuild.rebuild(myrebuilder)
b2 = myrebuilder.B()
self.assertEqual(b2.b(), "c")
self.assertEqual(b.b(), "c")
self.assertEqual(i.a(), "d")
self.assertEqual(a.a(), "b")
def test_Rebuild(self) -> None:
"""
Rebuilding an unchanged module.
"""
# This test would actually pass if rebuild was a no-op, but it
# ensures rebuild doesn't break stuff while being a less
# complex test than testFileRebuild.
x = crash_test_dummy.X("a")
rebuild.rebuild(crash_test_dummy, doLog=False)
# Instance rebuilding is triggered by attribute access.
x.do()
self.assertEqual(x.__class__, crash_test_dummy.X)
self.assertEqual(f, crash_test_dummy.METHOD_NAME)
def test_ComponentInteraction(self) -> None:
x = crash_test_dummy.XComponent()
x.setAdapter(crash_test_dummy.IX, crash_test_dummy.XA)
x.getComponent(crash_test_dummy.IX)
rebuild.rebuild(crash_test_dummy, 0)
newComponent = x.getComponent(crash_test_dummy.IX)
newComponent.method()
self.assertEqual(newComponent.__class__, crash_test_dummy.XA)
# Test that a duplicate registerAdapter is not allowed
from twisted.python import components
self.assertRaises(
ValueError,
components.registerAdapter,
crash_test_dummy.XA,
crash_test_dummy.X,
crash_test_dummy.IX,
)
def test_UpdateInstance(self) -> None:
global Foo, Buz
b = Buz()
class Foo:
def METHOD_NAME(self) -> None:
"""
Dummy method
"""
class Buz(Bar, Baz):
x = 10
rebuild.updateInstance(b)
assert hasattr(b, "foo"), "Missing method on rebuilt instance"
assert hasattr(b, "x"), "Missing class attribute on rebuilt instance"
def test_BananaInteraction(self) -> None:
from twisted.python import rebuild
from twisted.spread import banana
rebuild.latestClass(banana.Banana)
def test_hashException(self) -> None:
"""
Rebuilding something that has a __hash__ that raises a non-TypeError
shouldn't cause rebuild to die.
"""
global unhashableObject
unhashableObject = HashRaisesRuntimeError()
def _cleanup() -> None:
global unhashableObject
unhashableObject = None
self.addCleanup(_cleanup)
rebuild.rebuild(rebuild)
self.assertTrue(unhashableObject.hashCalled)
def test_Sensitive(self) -> None:
"""
L{twisted.python.rebuild.Sensitive}
"""
from twisted.python import rebuild
from twisted.python.rebuild import Sensitive
class TestSensitive(Sensitive):
def test_method(self) -> None:
"""
Dummy method
"""
testSensitive = TestSensitive()
testSensitive.rebuildUpToDate()
self.assertFalse(testSensitive.needRebuildUpdate())
# Test rebuilding a builtin class
newException = rebuild.latestClass(Exception)
self.assertEqual(repr(Exception), repr(newException))
self.assertEqual(newException, testSensitive.latestVersionOf(newException))
# Test types.MethodType on method in class
self.assertEqual(
TestSensitive.test_method,
testSensitive.latestVersionOf(TestSensitive.test_method),
)
# Test types.MethodType on method in instance of class
self.assertEqual(
testSensitive.test_method,
testSensitive.latestVersionOf(testSensitive.test_method),
)
# Test a class
self.assertEqual(TestSensitive, testSensitive.latestVersionOf(TestSensitive))
def myFunction() -> None:
"""
Dummy method
"""
# Test types.FunctionType
self.assertEqual(myFunction, testSensitive.latestVersionOf(myFunction))
class NewStyleTests(TestCase):
"""
Tests for rebuilding new-style classes of various sorts.
"""
def setUp(self) -> None:
self.m = types.ModuleType("whipping")
sys.modules["whipping"] = self.m
def tearDown(self) -> None:
del sys.modules["whipping"]
del self.m
def test_slots(self) -> None:
"""
Try to rebuild a new style class with slots defined.
"""
classDefinition = "class SlottedClass:\n" " __slots__ = ['a']\n"
exec(classDefinition, self.m.__dict__)
inst = self.m.SlottedClass()
inst.a = 7
exec(classDefinition, self.m.__dict__)
rebuild.updateInstance(inst)
self.assertEqual(inst.a, 7)
self.assertIs(type(inst), self.m.SlottedClass)
def test_typeSubclass(self) -> None:
"""
Try to rebuild a base type subclass.
"""
classDefinition = "class ListSubclass(list):\n" " pass\n"
exec(classDefinition, self.m.__dict__)
inst = self.m.ListSubclass()
inst.append(2)
exec(classDefinition, self.m.__dict__)
rebuild.updateInstance(inst)
self.assertEqual(inst[0], 2)
self.assertIs(type(inst), self.m.ListSubclass) |
299,864 | comb | import sys
from collections.abc import Iterable
from typing import Protocol, SupportsFloat, TypeVar, overload
from typing_extensions import SupportsIndex, TypeAlias
_T = TypeVar("_T")
_T_co = TypeVar("_T_co", covariant=True)
if sys.version_info >= (3, 8):
_SupportsFloatOrIndex: TypeAlias = SupportsFloat | SupportsIndex
else:
_SupportsFloatOrIndex: TypeAlias = SupportsFloat
e: float
pi: float
inf: float
nan: float
tau: float
def acos(__x: _SupportsFloatOrIndex) -> float: ...
def acosh(__x: _SupportsFloatOrIndex) -> float: ...
def asin(__x: _SupportsFloatOrIndex) -> float: ...
def asinh(__x: _SupportsFloatOrIndex) -> float: ...
def atan(__x: _SupportsFloatOrIndex) -> float: ...
def atan2(__y: _SupportsFloatOrIndex, __x: _SupportsFloatOrIndex) -> float: ...
def atanh(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 11):
def cbrt(__x: _SupportsFloatOrIndex) -> float: ...
class _SupportsCeil(Protocol[_T_co]):
def __ceil__(self) -> _T_co: ...
@overload
def ceil(__x: _SupportsCeil[_T]) -> _T: ...
@overload
def ceil(__x: _SupportsFloatOrIndex) -> int: ...
if sys.version_info >= (3, 8):
def METHOD_NAME(__n: SupportsIndex, __k: SupportsIndex) -> int: ...
def copysign(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
def cos(__x: _SupportsFloatOrIndex) -> float: ...
def cosh(__x: _SupportsFloatOrIndex) -> float: ...
def degrees(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 8):
def dist(__p: Iterable[_SupportsFloatOrIndex], __q: Iterable[_SupportsFloatOrIndex]) -> float: ...
def erf(__x: _SupportsFloatOrIndex) -> float: ...
def erfc(__x: _SupportsFloatOrIndex) -> float: ...
def exp(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 11):
def exp2(__x: _SupportsFloatOrIndex) -> float: ...
def expm1(__x: _SupportsFloatOrIndex) -> float: ...
def fabs(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 8):
def factorial(__x: SupportsIndex) -> int: ...
else:
def factorial(__x: int) -> int: ...
class _SupportsFloor(Protocol[_T_co]):
def __floor__(self) -> _T_co: ...
@overload
def floor(__x: _SupportsFloor[_T]) -> _T: ...
@overload
def floor(__x: _SupportsFloatOrIndex) -> int: ...
def fmod(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
def frexp(__x: _SupportsFloatOrIndex) -> tuple[float, int]: ...
def fsum(__seq: Iterable[_SupportsFloatOrIndex]) -> float: ...
def gamma(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 9):
def gcd(*integers: SupportsIndex) -> int: ...
else:
def gcd(__x: SupportsIndex, __y: SupportsIndex) -> int: ...
if sys.version_info >= (3, 8):
def hypot(*coordinates: _SupportsFloatOrIndex) -> float: ...
else:
def hypot(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
def isclose(
a: _SupportsFloatOrIndex,
b: _SupportsFloatOrIndex,
*,
rel_tol: _SupportsFloatOrIndex = 1e-09,
abs_tol: _SupportsFloatOrIndex = 0.0,
) -> bool: ...
def isinf(__x: _SupportsFloatOrIndex) -> bool: ...
def isfinite(__x: _SupportsFloatOrIndex) -> bool: ...
def isnan(__x: _SupportsFloatOrIndex) -> bool: ...
if sys.version_info >= (3, 8):
def isqrt(__n: SupportsIndex) -> int: ...
if sys.version_info >= (3, 9):
def lcm(*integers: SupportsIndex) -> int: ...
def ldexp(__x: _SupportsFloatOrIndex, __i: int) -> float: ...
def lgamma(__x: _SupportsFloatOrIndex) -> float: ...
def log(x: _SupportsFloatOrIndex, base: _SupportsFloatOrIndex = ...) -> float: ...
def log10(__x: _SupportsFloatOrIndex) -> float: ...
def log1p(__x: _SupportsFloatOrIndex) -> float: ...
def log2(__x: _SupportsFloatOrIndex) -> float: ...
def modf(__x: _SupportsFloatOrIndex) -> tuple[float, float]: ...
if sys.version_info >= (3, 12):
def nextafter(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex, *, steps: SupportsIndex | None = None) -> float: ...
elif sys.version_info >= (3, 9):
def nextafter(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 8):
def perm(__n: SupportsIndex, __k: SupportsIndex | None = None) -> int: ...
def pow(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 8):
@overload
def prod(__iterable: Iterable[SupportsIndex], *, start: SupportsIndex = 1) -> int: ... # type: ignore[misc]
@overload
def prod(__iterable: Iterable[_SupportsFloatOrIndex], *, start: _SupportsFloatOrIndex = 1) -> float: ...
def radians(__x: _SupportsFloatOrIndex) -> float: ...
def remainder(__x: _SupportsFloatOrIndex, __y: _SupportsFloatOrIndex) -> float: ...
def sin(__x: _SupportsFloatOrIndex) -> float: ...
def sinh(__x: _SupportsFloatOrIndex) -> float: ...
if sys.version_info >= (3, 12):
def sumprod(__p: Iterable[float], __q: Iterable[float]) -> float: ...
def sqrt(__x: _SupportsFloatOrIndex) -> float: ...
def tan(__x: _SupportsFloatOrIndex) -> float: ...
def tanh(__x: _SupportsFloatOrIndex) -> float: ...
# Is different from `_typeshed.SupportsTrunc`, which is not generic
class _SupportsTrunc(Protocol[_T_co]):
def __trunc__(self) -> _T_co: ...
def trunc(__x: _SupportsTrunc[_T]) -> _T: ...
if sys.version_info >= (3, 9):
def ulp(__x: _SupportsFloatOrIndex) -> float: ... |
299,865 | test get version from file | """Test runway.env_mgr.kbenv."""
# pyright: basic, reportFunctionMemberAccess=none
from __future__ import annotations
import re
from typing import TYPE_CHECKING, Optional
import pytest
from runway.env_mgr.kbenv import KB_VERSION_FILENAME, KBEnvManager
from runway.utils import Version
if TYPE_CHECKING:
from pathlib import Path
from pytest_mock import MockerFixture
MODULE = "runway.env_mgr.kbenv"
class TestKBEnvManager:
"""Test KBEnvManager."""
def METHOD_NAME(self, tmp_path: Path) -> None:
"""Test get_version_from_file."""
obj = KBEnvManager(tmp_path)
# no version file or path
assert not obj.get_version_from_file()
# path not provided; use version file
version_file = tmp_path / KB_VERSION_FILENAME
version_file.write_text("v1.22.0")
assert obj.get_version_from_file(version_file) == "v1.22.0"
@pytest.mark.parametrize("version_requested", ["v1.21.0", "1.12.0"])
def test_install_version_requested(
self, mocker: MockerFixture, tmp_path: Path, version_requested: str
) -> None:
"""Test install version_requested."""
mock_download_kb_release = mocker.patch(f"{MODULE}.download_kb_release")
mocker.patch.object(KBEnvManager, "versions_dir", tmp_path / "kbenv")
obj = KBEnvManager(tmp_path)
assert obj.install(version_requested) == str(obj.bin)
mock_download_kb_release.assert_called_once_with(
version_requested
if version_requested.startswith("v")
else f"v{version_requested}",
obj.versions_dir,
)
def test_list_installed(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test list_installed."""
mocker.patch.object(KBEnvManager, "versions_dir", tmp_path)
version_dirs = [tmp_path / "v1.14.0", tmp_path / "v1.21.0"]
for v_dir in version_dirs:
v_dir.mkdir()
(tmp_path / "something.txt").touch()
result = list(KBEnvManager().list_installed()) # convert generator to list
result.sort() # sort list for comparison
assert result == version_dirs
def test_list_installed_none(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test list_installed."""
mocker.patch.object(KBEnvManager, "versions_dir", tmp_path)
assert not list(KBEnvManager().list_installed())
@pytest.mark.parametrize(
"provided, expected",
[
("0.15.2", Version("v0.15.2")),
("v0.15.2", Version("v0.15.2")),
("0.15.0-alpha.13", Version("v0.15.0-alpha.13")),
("v0.15.0-alpha.13", Version("v0.15.0-alpha.13")),
],
)
def test_parse_version_string(
self, provided: str, expected: Optional[Version]
) -> None:
"""Test parse_version_string."""
assert KBEnvManager.parse_version_string(provided) == expected
def test_parse_version_string_raise_value_error(self) -> None:
"""Test parse_version_string."""
with pytest.raises(
ValueError,
match=re.escape(
f"provided version doesn't conform to regex: {KBEnvManager.VERSION_REGEX}"
),
):
KBEnvManager.parse_version_string("invalid")
def test_set_version(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test set_version."""
version = Version("1.22.0")
mocker.patch.object(KBEnvManager, "get_version_from_file", return_value=None)
obj = KBEnvManager(tmp_path)
assert not obj.current_version
assert not obj.set_version(str(version))
assert obj.version == version
assert obj.current_version == str(version)
def test_set_version_same(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test set_version same."""
version = mocker.patch.object(KBEnvManager, "version")
obj = KBEnvManager(tmp_path)
obj.current_version = "v1.22.0"
assert not obj.set_version("v1.22.0")
assert obj.current_version == "v1.22.0"
assert obj.version == version
def test_version(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test version."""
get_version_from_file = mocker.patch.object(
KBEnvManager, "get_version_from_file"
)
parse_version_string = mocker.patch.object(
KBEnvManager, "parse_version_string", return_value="success"
)
obj = KBEnvManager(tmp_path)
obj.current_version = "version"
assert obj.version == "success"
get_version_from_file.assert_not_called()
parse_version_string.assert_called_once_with("version")
def test_version_get_version_from_file(
self, mocker: MockerFixture, tmp_path: Path
) -> None:
"""Test version."""
get_version_from_file = mocker.patch.object(
KBEnvManager, "get_version_from_file", return_value="version"
)
parse_version_string = mocker.patch.object(
KBEnvManager, "parse_version_string", return_value="success"
)
obj = KBEnvManager(tmp_path)
assert obj.version == "success"
get_version_from_file.assert_called_once_with()
parse_version_string.assert_called_once_with("version")
def test_version_none(self, mocker: MockerFixture, tmp_path: Path) -> None:
"""Test version."""
get_version_from_file = mocker.patch.object(
KBEnvManager, "get_version_from_file", return_value=None
)
parse_version_string = mocker.patch.object(KBEnvManager, "parse_version_string")
obj = KBEnvManager(tmp_path)
assert not obj.version
get_version_from_file.assert_called_once_with()
parse_version_string.assert_not_called()
def test_version_file(self, tmp_path: Path) -> None:
"""Test version_file."""
mod_path = tmp_path / "mod"
overlay_path = mod_path / "overlay"
overlay_path.mkdir(parents=True)
obj = KBEnvManager(mod_path)
# no version file
assert not obj.version_file
del obj.version_file
# version file in parent dir
expected = tmp_path / KB_VERSION_FILENAME
expected.touch()
assert obj.version_file == expected
del obj.version_file
# version file in module dir
expected = mod_path / KB_VERSION_FILENAME
expected.touch()
assert obj.version_file == expected
del obj.version_file
# version file in overlay dir
expected = overlay_path / KB_VERSION_FILENAME
expected.touch()
assert obj.version_file == mod_path / KB_VERSION_FILENAME
assert (
KBEnvManager(mod_path, overlay_path=overlay_path).version_file == expected
) |
299,866 | url | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"servicebus georecovery-alias authorization-rule keys list",
)
class List(AAZCommand):
"""Gets the primary and secondary connection strings for the namespace.
"""
_aaz_info = {
"version": "2022-10-01-preview",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.servicebus/namespaces/{}/disasterrecoveryconfigs/{}/authorizationrules/{}/listkeys", "2022-10-01-preview"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return self._output()
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.alias = AAZStrArg(
options=["-a", "--alias"],
help="The Disaster Recovery configuration name",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=1,
),
)
_args_schema.authorization_rule_name = AAZStrArg(
options=["-n", "--name", "--authorization-rule-name"],
help="The authorization rule name.",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=1,
),
)
_args_schema.namespace_name = AAZStrArg(
options=["--namespace-name"],
help="The namespace name",
required=True,
fmt=AAZStrArgFormat(
max_length=50,
min_length=6,
),
)
_args_schema.resource_group = AAZResourceGroupNameArg(
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.DisasterRecoveryConfigsListKeys(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
def _output(self, *args, **kwargs):
result = self.deserialize_output(self.ctx.vars.instance, client_flatten=True)
return result
class DisasterRecoveryConfigsListKeys(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def METHOD_NAME(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/disasterRecoveryConfigs/{alias}/authorizationRules/{authorizationRuleName}/listKeys",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "MgmtErrorFormat"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"alias", self.ctx.args.alias,
required=True,
),
**self.serialize_url_param(
"authorizationRuleName", self.ctx.args.authorization_rule_name,
required=True,
),
**self.serialize_url_param(
"namespaceName", self.ctx.args.namespace_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-10-01-preview",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Accept", "application/json",
),
}
return parameters
def on_200(self, session):
data = self.deserialize_http_content(session)
self.ctx.set_var(
"instance",
data,
schema_builder=self._build_schema_on_200
)
_schema_on_200 = None
@classmethod
def _build_schema_on_200(cls):
if cls._schema_on_200 is not None:
return cls._schema_on_200
cls._schema_on_200 = AAZObjectType()
_schema_on_200 = cls._schema_on_200
_schema_on_200.alias_primary_connection_string = AAZStrType(
serialized_name="aliasPrimaryConnectionString",
flags={"read_only": True},
)
_schema_on_200.alias_secondary_connection_string = AAZStrType(
serialized_name="aliasSecondaryConnectionString",
flags={"read_only": True},
)
_schema_on_200.key_name = AAZStrType(
serialized_name="keyName",
flags={"read_only": True},
)
_schema_on_200.primary_connection_string = AAZStrType(
serialized_name="primaryConnectionString",
flags={"read_only": True},
)
_schema_on_200.primary_key = AAZStrType(
serialized_name="primaryKey",
flags={"read_only": True},
)
_schema_on_200.secondary_connection_string = AAZStrType(
serialized_name="secondaryConnectionString",
flags={"read_only": True},
)
_schema_on_200.secondary_key = AAZStrType(
serialized_name="secondaryKey",
flags={"read_only": True},
)
return cls._schema_on_200
class _ListHelper:
"""Helper class for List"""
__all__ = ["List"] |
299,867 | detail | # -*- coding: utf-8 -*-
# This file is part of wger Workout Manager.
#
# wger Workout Manager is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# wger Workout Manager is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# Standard Library
import datetime
import logging
# Django
from django.http import (
HttpResponseForbidden,
HttpResponseRedirect,
)
from django.shortcuts import (
get_object_or_404,
render,
)
from django.urls import reverse
from django.utils.translation import gettext_lazy
from django.views.generic import (
CreateView,
DeleteView,
)
# wger
from wger.nutrition.forms import MealLogItemForm
from wger.nutrition.models import (
LogItem,
Meal,
NutritionPlan,
)
from wger.utils.generic_views import (
WgerDeleteMixin,
WgerFormMixin,
WgerPermissionMixin,
)
logger = logging.getLogger(__name__)
def overview(request, pk):
"""
Shows an overview of diary entries for the given plan
"""
# Check read permission
plan = get_object_or_404(NutritionPlan, pk=pk)
user = plan.user
is_owner = request.user == user
if not is_owner and not user.userprofile.ro_access:
return HttpResponseForbidden()
log_data = []
planned_calories = plan.get_nutritional_values()['total']['energy']
for item in plan.get_log_overview():
log_data.append(
{
'date': item['date'],
'planned_calories': planned_calories,
'logged_calories': item['energy'],
'difference': item['energy'] - planned_calories
}
)
context = {'plan': plan, 'show_shariff': is_owner, 'is_owner': is_owner, 'log_data': log_data}
return render(request, 'log/overview.html', context)
def METHOD_NAME(request, pk, year, month, day):
"""
Shows an overview of the log for the given date
"""
# Check read permission
plan = get_object_or_404(NutritionPlan, pk=pk)
user = plan.user
is_owner = request.user == user
if not is_owner and not user.userprofile.ro_access:
return HttpResponseForbidden()
try:
date = datetime.date(year=int(year), month=int(month), day=int(day))
except ValueError:
date = datetime.date.today()
return HttpResponseRedirect(
reverse(
'nutrition:log:detail',
kwargs={
'pk': pk,
'year': date.year,
'month': date.month,
'day': date.day
}
)
)
context = {
'plan': plan,
'date': date,
'show_shariff': is_owner,
'is_owner': is_owner,
'log_summary': plan.get_log_summary(date),
'log_entries': plan.get_log_entries(date),
'nutritional_data': plan.get_nutritional_values()
}
return render(request, 'log/detail.html', context)
def log_meal(request, meal_pk):
"""
Copy the requested meal item and logs its nutritional values
"""
# Check read permission
meal = get_object_or_404(Meal, pk=meal_pk)
mealUser = meal.plan.user
is_owner = request.user == mealUser
if not is_owner and not mealUser.userprofile.ro_access:
return HttpResponseForbidden()
_logMealPlan([meal])
date = datetime.date.today()
return HttpResponseRedirect(
reverse(
'nutrition:log:detail',
kwargs={
'pk': meal.plan_id,
'year': date.year,
'month': date.month,
'day': date.day
}
)
)
def log_plan(request, plan_pk):
"""
Copy the requested plan item and log all of the meals within it
"""
plan = get_object_or_404(NutritionPlan, pk=plan_pk)
planUser = plan.user
is_owner = request.user == planUser
if not is_owner and not planUser.userprofile.ro_access:
return HttpResponseForbidden()
_logMealPlan(plan.meal_set.select_related())
return HttpResponseRedirect(reverse('nutrition:log:overview', kwargs={'pk': plan_pk}))
def _logMealPlan(meals):
"""
Helper method to log a collection of meals
"""
for meal in meals:
for item in meal.mealitem_set.select_related():
log_item = LogItem(
plan=item.meal.plan,
meal=meal,
ingredient=item.ingredient,
weight_unit=item.weight_unit,
amount=item.amount
)
log_item.save()
class LogCreateView(WgerFormMixin, CreateView):
"""
Generic view to create a new meal diary entry
"""
model = LogItem
form_class = MealLogItemForm
custom_js = 'wgerInitIngredientAutocompleter();'
plan = None
def dispatch(self, request, *args, **kwargs):
"""
Check that the user owns the meal
"""
plan = get_object_or_404(NutritionPlan, pk=kwargs['plan_pk'])
if plan.user == request.user:
self.plan = plan
return super(LogCreateView, self).dispatch(request, *args, **kwargs)
else:
return HttpResponseForbidden()
def get_success_url(self):
return reverse('nutrition:plan:view', kwargs={'id': self.plan.id})
def get_context_data(self, **kwargs):
"""
Send some additional data to the template
"""
context = super(LogCreateView, self).get_context_data(**kwargs)
context['ingredient_searchfield'] = self.request.POST.get('ingredient_searchfield', '')
return context
def form_valid(self, form):
"""
Manually set the corresponding meal
"""
form.instance.plan = self.plan
return super(LogCreateView, self).form_valid(form)
class LogDeleteView(WgerDeleteMixin, DeleteView, WgerPermissionMixin):
"""
Delete a nutrition diary entry
"""
model = LogItem
title = gettext_lazy('Delete?')
form_action_urlname = 'nutrition:log:delete'
login_required = True
def get_success_url(self):
"""
Return to the nutrition diary detail page
"""
return reverse(
'nutrition:log:detail',
kwargs={
'pk': self.object.plan.pk,
'year': self.object.datetime.year,
'month': self.object.datetime.month,
'day': self.object.datetime.day
}
) |
299,868 | get value | '''
This modules contains a distutils extension mechanism for Pythran
* PythranExtension: is used as distutils's Extension
'''
import pythran.config as cfg
from collections import defaultdict
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import os.path
import os
from distutils.command.build_ext import build_ext as LegacyBuildExt
try:
# `numpy.distutils` is deprecated, and won't be present on Python >=3.12
# If it is installed, we need to use it though, so try-import it:
from numpy.distutils.extension import Extension
except ImportError:
from distutils.extension import Extension
class PythranBuildExtMixIn(object):
"""Subclass of `distutils.command.build_ext.build_ext` which is required to
build `PythranExtension` with the configured C++ compiler. It may also be
subclassed if you want to combine with another build_ext class (NumPy,
Cython implementations).
"""
def build_extension(self, ext):
StringTypes = str,
def METHOD_NAME(obj, key):
var = getattr(obj, key)
if isinstance(var, Iterable) and not isinstance(var, StringTypes):
return var[0]
else:
return var
def set_value(obj, key, value):
var = getattr(obj, key)
if isinstance(var, Iterable) and not isinstance(var, StringTypes):
var[0] = value
else:
setattr(obj, key, value)
prev = {
# linux-like
'preprocessor': None,
'compiler_cxx': None,
'compiler_so': None,
'compiler': None,
'linker_exe': None,
'linker_so': None,
# Windows-like
'cc': None,
}
# Backup compiler settings
for key in list(prev.keys()):
if hasattr(self.compiler, key):
prev[key] = METHOD_NAME(self.compiler, key)
else:
del prev[key]
# try hard to modify the compiler
if getattr(ext, 'cxx', None) is not None:
for comp in prev:
if hasattr(self.compiler, comp):
set_value(self.compiler, comp, ext.cxx)
find_exe = None
if getattr(ext, 'cc', None) is not None:
try:
import distutils._msvccompiler as msvc
# install hook
find_exe = msvc._find_exe
def _find_exe(exe, *args, **kwargs):
if exe == 'cl.exe':
exe = ext.cc
return find_exe(exe, *args, **kwargs)
msvc._find_exe = _find_exe
except ImportError:
pass
# In general, distutils uses -Wstrict-prototypes, but this option
# is not valid for C++ code, only for C. Remove it if it's there
# to avoid a spurious warning on every compilation.
for flag in cfg.cfg.get('compiler', "ignoreflags").split():
for target in ('compiler_so', 'linker_so'):
try:
while True:
getattr(self.compiler, target).remove(flag)
except (AttributeError, ValueError):
pass
# Remove -arch i386 if 'x86_64' is specified, otherwise incorrect
# code is generated, at least on OSX
if hasattr(self.compiler, 'compiler_so'):
archs = defaultdict(list)
for i, flag in enumerate(self.compiler.compiler_so[1:]):
if self.compiler.compiler_so[i] == '-arch':
archs[flag].append(i + 1)
if 'x86_64' in archs and 'i386' in archs:
for i in archs['i386']:
self.compiler.compiler_so[i] = 'x86_64'
try:
return super(PythranBuildExtMixIn, self).build_extension(ext)
finally:
# Revert compiler settings
for key in prev.keys():
set_value(self.compiler, key, prev[key])
# uninstall hook
if find_exe is not None:
import distutils._msvccompiler as msvc
msvc._find_exe = find_exe
class PythranBuildExtMeta(type):
def __getitem__(self, base):
class PythranBuildExt(PythranBuildExtMixIn, base):
pass
return PythranBuildExt
class PythranBuildExt(PythranBuildExtMixIn, LegacyBuildExt, metaclass=PythranBuildExtMeta):
pass
class PythranExtension(Extension):
'''
Description of a Pythran extension
Similar to distutils.core.Extension except that the sources are .py files
They must be processable by pythran, of course.
The compilation process ends up in a native Python module.
'''
def __init__(self, name, sources, *args, **kwargs):
cfg_ext = cfg.make_extension(python=True, **kwargs)
self.cxx = cfg_ext.pop('cxx', None)
self.cc = cfg_ext.pop('cc', None)
self._sources = sources
Extension.__init__(self, name, sources, *args, **cfg_ext)
self.__dict__.pop("sources", None)
@property
def sources(self):
import pythran.toolchain as tc
cxx_sources = []
for source in self._sources:
base, ext = os.path.splitext(source)
if ext != '.py':
cxx_sources.append(source)
continue
output_file = base + '.cpp' # target name
if os.path.exists(source) and (not os.path.exists(output_file)
or os.path.getmtime(output_file) < os.path.getmtime(source)):
# get the last name in the path
if '.' in self.name:
module_name = os.path.splitext(self.name)[-1][1:]
else:
module_name = self.name
tc.compile_pythranfile(source, output_file,
module_name, cpponly=True)
cxx_sources.append(output_file)
return cxx_sources
@sources.setter
def sources(self, sources):
self._sources = sources |
299,869 | test check config | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""test nb_init links."""
import datetime
import os
import subprocess # nosec
from datetime import timedelta
from enum import Enum
from pathlib import Path
import pandas as pd
import pytest
import pytest_check as check
from msticpy.init import azure_ml_tools, nbinit
from msticpy.init.nbinit import _get_or_create_config, _imp_module_all, init_notebook
from ..unit_test_lib import TEST_DATA_PATH, custom_mp_config
@pytest.mark.filterwarnings("ignore::UserWarning")
def test_nbinit_no_params():
"""Test init_notebook defaults."""
ns_dict = {}
init_notebook(
namespace=ns_dict,
def_imports="nb",
verbose=True,
)
check.is_in("pd", ns_dict)
check.is_in("get_ipython", ns_dict)
check.is_in("Path", ns_dict)
check.is_in("np", ns_dict)
print(ns_dict.keys())
# Note - msticpy imports throw when exec'd from unit test
# e.g. check.is_in("QueryProvider", ns_dict) fails
check.is_in("WIDGET_DEFAULTS", ns_dict)
check.equal(ns_dict["pd"].__name__, "pandas")
check.equal(ns_dict["np"].__name__, "numpy")
check.equal(pd.get_option("display.max_columns"), 50)
def test_nbinit_imports():
"""Test custom imports."""
ns_dict = {}
init_notebook(
namespace=ns_dict,
extra_imports=["pathlib", "datetime, time", "datetime, timedelta, tdelta"],
def_imports="nb",
verbose=True,
)
print(ns_dict.keys())
check.is_in("pathlib", ns_dict)
check.is_in("time", ns_dict)
check.is_in("tdelta", ns_dict)
check.is_in("np", ns_dict)
check.equal(timedelta, ns_dict["tdelta"])
check.equal(datetime.time, ns_dict["time"])
def test_import_all():
"""Test import all function."""
ns_dict = {}
_imp_module_all(ns_dict, module_name="datetime")
for imp in ["date", "datetime", "time", "timedelta", "timezone", "tzinfo"]:
check.is_in(imp, ns_dict)
class SubDirCase(Enum):
"""Test enumeration for config folder."""
NONE = 0
MAIN_ENV_PTR = 1
SAME_DIR = 2
SEARCH = 3
_CONFIG_TESTS = [
(("missing_file", None, SubDirCase.NONE), False),
(
("msticpyconfig.yaml", None, SubDirCase.MAIN_ENV_PTR),
True,
),
(
(
"msticpyconfig-noAzSentSettings.yaml",
None,
SubDirCase.MAIN_ENV_PTR,
),
False,
),
(
("msticpyconfig-no-settings.yaml", None, SubDirCase.MAIN_ENV_PTR),
False,
),
(
("msticpyconfig.yaml", None, SubDirCase.SAME_DIR),
True,
),
(
("msticpyconfig-noAzSentSettings.yaml", None, SubDirCase.SAME_DIR),
False,
),
(
("msticpyconfig-no-settings.yaml", None, SubDirCase.SAME_DIR),
False,
),
(
(None, "config.json", SubDirCase.SAME_DIR),
True,
),
(
(None, "config.json", SubDirCase.SEARCH),
True,
),
(
("msticpyconfig.yaml", None, SubDirCase.SEARCH),
False,
),
(
("msticpyconfig-no-settings.yaml", None, SubDirCase.SEARCH),
False,
),
(
(
"msticpyconfig-noAzSentSettings.yaml",
"config.json",
SubDirCase.MAIN_ENV_PTR,
),
True,
),
(
("msticpyconfig-no-settings.yaml", "config.json", SubDirCase.MAIN_ENV_PTR),
True,
),
(
(
"msticpyconfig-noAzSentSettings.yaml",
"config.json",
SubDirCase.SAME_DIR,
),
True,
),
(
("msticpyconfig-no-settings.yaml", "config.json", SubDirCase.SAME_DIR),
True,
),
(
(
"msticpyconfig-noAzSentSettings.yaml",
"config.json",
SubDirCase.SEARCH,
),
True,
),
(
("msticpyconfig-no-settings.yaml", "config.json", SubDirCase.SEARCH),
True,
),
]
_test_ids = [
f"{test[0][0]}/{test[0][1]}-{test[0][2].name} => {'Success' if test[1] else 'Fail'}"
for test in _CONFIG_TESTS
]
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("conf_file, expected", _CONFIG_TESTS, ids=_test_ids)
def METHOD_NAME(conf_file, expected, tmp_path, monkeypatch):
"""Test config check."""
mpconf_file, conf_json, mp_location = conf_file
init_cwd = str(Path(".").absolute())
settings_file = "missing_file"
for file in tmp_path.parent.glob("config.json"):
file.unlink()
for file in tmp_path.parent.glob("msticpyconfig.yaml"):
file.unlink()
try:
# If we want to test against config files in isolated directory
if mp_location != SubDirCase.NONE:
# Read contents of source file
for file in (mpconf_file, conf_json):
if file is None:
continue
tgt_file = Path(TEST_DATA_PATH).joinpath(file).name
file_txt = (
Path(TEST_DATA_PATH).joinpath(file).read_text(encoding="utf-8")
)
dest_file = (
"config.json"
if tgt_file.endswith(".json")
else "msticpyconfig.yaml"
)
# write the file to the folder
tmp_path.joinpath(dest_file).write_text(file_txt)
cwd_path = str(tmp_path)
# If sub-dir, change to the directory, so WorkspaceConfig has to search.
if mp_location in (SubDirCase.MAIN_ENV_PTR, SubDirCase.SEARCH):
cwd_path = tmp_path.joinpath("sub_folder")
cwd_path.mkdir(parents=True, exist_ok=True)
os.chdir(str(cwd_path))
if mp_location == SubDirCase.SEARCH or mpconf_file is None:
# Pass non-existing file to custom_mp_config to bypass default settings
settings_file = "missing_file"
else:
settings_file = tmp_path.joinpath("msticpyconfig.yaml")
else:
os.chdir(str(tmp_path))
# with custom_mp_config(settings_file, path_check=False):
monkeypatch.setenv("MSTICPYCONFIG", str(settings_file))
monkeypatch.setattr(nbinit, "current_config_path", lambda: None)
monkeypatch.setattr(nbinit, "is_in_aml", lambda: True)
monkeypatch.setattr(azure_ml_tools, "get_aml_user_folder", lambda: tmp_path)
result = _get_or_create_config()
print("result=", result)
check.equal(result, expected, "Result")
finally:
os.chdir(init_cwd)
@pytest.mark.skipif(
not os.environ.get("MSTICPY_TEST_NOSKIP"), reason="Skipped for local tests."
)
def test_install_pkgs():
"""Test installing and importing a package."""
test_pkg = "pip_install_test"
test_imp = "pip_install_test, , test_pkg_import"
# Uninstall package if it is already there
subprocess.run(["pip", "uninstall", "-y", test_pkg], check=True) # nosec
ns_dict = {}
init_notebook(
namespace=ns_dict,
additional_packages=[test_pkg],
def_imports="nb",
extra_imports=test_imp,
verbose=True,
)
for name, obj in ns_dict.items():
print(name, type(obj))
check.is_in("test_pkg_import", ns_dict)
print(ns_dict)
subprocess.run(["pip", "uninstall", "-y", test_pkg], check=True) # nosec |
299,870 | checkpassword | # This file is part of CherryPy <http://www.cherrypy.org/>
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:expandtab:fileencoding=utf-8
"""HTTP Basic Authentication tool.
This module provides a CherryPy 3.x tool which implements
the server-side of HTTP Basic Access Authentication, as described in
:rfc:`2617`.
Example usage, using the built-in checkpassword_dict function which uses a dict
as the credentials store::
userpassdict = {'bird' : 'bebop', 'ornette' : 'wayout'}
checkpassword = cherrypy.lib.auth_basic.checkpassword_dict(userpassdict)
basic_auth = {'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'earth',
'tools.auth_basic.checkpassword': checkpassword,
'tools.auth_basic.accept_charset': 'UTF-8',
}
app_config = { '/' : basic_auth }
"""
import binascii
import unicodedata
import base64
import cherrypy
from cherrypy._cpcompat import ntou, tonative
__author__ = 'visteya'
__date__ = 'April 2009'
def checkpassword_dict(user_password_dict):
"""Returns a checkpassword function which checks credentials
against a dictionary of the form: {username : password}.
If you want a simple dictionary-based authentication scheme, use
checkpassword_dict(my_credentials_dict) as the value for the
checkpassword argument to basic_auth().
"""
def METHOD_NAME(realm, user, password):
p = user_password_dict.get(user)
return p and p == password or False
return METHOD_NAME
def basic_auth(realm, METHOD_NAME, debug=False, accept_charset='utf-8'):
"""A CherryPy tool which hooks at before_handler to perform
HTTP Basic Access Authentication, as specified in :rfc:`2617`
and :rfc:`7617`.
If the request has an 'authorization' header with a 'Basic' scheme, this
tool attempts to authenticate the credentials supplied in that header. If
the request has no 'authorization' header, or if it does but the scheme is
not 'Basic', or if authentication fails, the tool sends a 401 response with
a 'WWW-Authenticate' Basic header.
realm
A string containing the authentication realm.
checkpassword
A callable which checks the authentication credentials.
Its signature is checkpassword(realm, username, password). where
username and password are the values obtained from the request's
'authorization' header. If authentication succeeds, checkpassword
returns True, else it returns False.
"""
fallback_charset = 'ISO-8859-1'
if '"' in realm:
raise ValueError('Realm cannot contain the " (quote) character.')
request = cherrypy.serving.request
auth_header = request.headers.get('authorization')
if auth_header is not None:
# split() error, base64.decodestring() error
msg = 'Bad Request'
with cherrypy.HTTPError.handle((ValueError, binascii.Error), 400, msg):
scheme, params = auth_header.split(' ', 1)
if scheme.lower() == 'basic':
charsets = accept_charset, fallback_charset
decoded_params = base64.b64decode(params.encode('ascii'))
decoded_params = _try_decode(decoded_params, charsets)
decoded_params = ntou(decoded_params)
decoded_params = unicodedata.normalize('NFC', decoded_params)
decoded_params = tonative(decoded_params)
username, password = decoded_params.split(':', 1)
if METHOD_NAME(realm, username, password):
if debug:
cherrypy.log('Auth succeeded', 'TOOLS.AUTH_BASIC')
request.login = username
return # successful authentication
charset = accept_charset.upper()
charset_declaration = (
(', charset="%s"' % charset)
if charset != fallback_charset
else ''
)
# Respond with 401 status and a WWW-Authenticate header
cherrypy.serving.response.headers['www-authenticate'] = (
'Basic realm="%s"%s' % (realm, charset_declaration)
)
raise cherrypy.HTTPError(
401, 'You are not authorized to access that resource')
def _try_decode(subject, charsets):
for charset in charsets[:-1]:
try:
return tonative(subject, charset)
except ValueError:
pass
return tonative(subject, charsets[-1]) |
299,871 | extract urls | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
extract_attributes,
int_or_none,
mimetype2ext,
parse_iso8601,
)
class MedialaanIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://
(?:
(?:embed\.)?mychannels.video/embed/|
embed\.mychannels\.video/(?:s(?:dk|cript)/)?production/|
(?:www\.)?(?:
(?:
7sur7|
demorgen|
hln|
joe|
qmusic
)\.be|
(?:
[abe]d|
bndestem|
destentor|
gelderlander|
pzc|
tubantia|
volkskrant
)\.nl
)/video/(?:[^/]+/)*[^/?&#]+~p
)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'https://www.bndestem.nl/video/de-terugkeer-van-ally-de-aap-en-wie-vertrekt-er-nog-bij-nac~p193993',
'info_dict': {
'id': '193993',
'ext': 'mp4',
'title': 'De terugkeer van Ally de Aap en wie vertrekt er nog bij NAC?',
'timestamp': 1611663540,
'upload_date': '20210126',
'duration': 238,
},
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.gelderlander.nl/video/kanalen/degelderlander~c320/series/snel-nieuws~s984/noodbevel-in-doetinchem-politie-stuurt-mensen-centrum-uit~p194093',
'only_matching': True,
}, {
'url': 'https://embed.mychannels.video/sdk/production/193993?options=TFTFF_default',
'only_matching': True,
}, {
'url': 'https://embed.mychannels.video/script/production/193993',
'only_matching': True,
}, {
'url': 'https://embed.mychannels.video/production/193993',
'only_matching': True,
}, {
'url': 'https://mychannels.video/embed/193993',
'only_matching': True,
}, {
'url': 'https://embed.mychannels.video/embed/193993',
'only_matching': True,
}]
@staticmethod
def METHOD_NAME(webpage):
entries = []
for element in re.findall(r'(<div[^>]+data-mychannels-type="video"[^>]*>)', webpage):
mychannels_id = extract_attributes(element).get('data-mychannels-id')
if mychannels_id:
entries.append('https://mychannels.video/embed/' + mychannels_id)
return entries
def _real_extract(self, url):
production_id = self._match_id(url)
production = self._download_json(
'https://embed.mychannels.video/sdk/production/' + production_id,
production_id, query={'options': 'UUUU_default'})['productions'][0]
title = production['title']
formats = []
for source in (production.get('sources') or []):
src = source.get('src')
if not src:
continue
ext = mimetype2ext(source.get('type'))
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
src, production_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
else:
formats.append({
'ext': ext,
'url': src,
})
self._sort_formats(formats)
return {
'id': production_id,
'title': title,
'formats': formats,
'thumbnail': production.get('posterUrl'),
'timestamp': parse_iso8601(production.get('publicationDate'), ' '),
'duration': int_or_none(production.get('duration')) or None,
} |
299,872 | test user sync is disabled by default | import uuid
from unittest.mock import MagicMock, patch
from django.test import SimpleTestCase
from dimagi.utils.couch.undo import DELETED_SUFFIX
from corehq.apps.es.client import manager
from corehq.apps.es.tests.utils import es_test
from corehq.apps.es.users import user_adapter
from corehq.apps.reports.analytics.esaccessors import get_user_stubs
from corehq.util.es.testing import sync_users_to_es
from corehq.util.test_utils import mock_out_couch
from ..models import CommCareUser, WebUser
from ..signals import update_user_in_es
# Note that you can't directly patch the signal handler, as that code has
# already been called. It's easier to patch something that the handler calls.
# Also, you need to patch the path to the function in the file where the signal
# handler uses it, not where it's actually defined. That's quite a gotcha.
@mock_out_couch()
@patch('corehq.apps.sms.tasks.sync_user_phone_numbers', new=MagicMock())
@patch('corehq.apps.users.models.CouchUser.sync_to_django_user', new=MagicMock())
@patch('corehq.apps.users.models.CommCareUser.project', new=MagicMock())
@es_test
class TestUserSignals(SimpleTestCase):
@patch('corehq.apps.analytics.signals.update_hubspot_properties.delay')
@patch('corehq.apps.callcenter.tasks.sync_usercases')
@patch('corehq.apps.cachehq.signals.invalidate_document')
@patch('corehq.apps.users.signals._update_user_in_es')
def test_commcareuser_save(self, send_to_es, invalidate, sync_usercases,
update_hubspot_properties):
CommCareUser(username='test').save()
self.assertTrue(send_to_es.called)
self.assertTrue(invalidate.called)
self.assertTrue(sync_usercases.called)
self.assertFalse(update_hubspot_properties.called)
@patch('corehq.apps.analytics.signals.update_hubspot_properties.delay')
@patch('corehq.apps.callcenter.tasks.sync_usercases')
@patch('corehq.apps.cachehq.signals.invalidate_document')
@patch('corehq.apps.users.signals._update_user_in_es')
def test_webuser_save(self, send_to_es, invalidate, sync_usercases,
update_hubspot_properties):
WebUser().save()
self.assertTrue(send_to_es.called)
self.assertTrue(invalidate.called)
self.assertFalse(sync_usercases.called)
self.assertTrue(update_hubspot_properties.called)
@mock_out_couch()
@patch('corehq.apps.users.models.CouchUser.sync_to_django_user', new=MagicMock)
@patch('corehq.apps.analytics.signals.update_hubspot_properties')
@patch('corehq.apps.callcenter.tasks.sync_usercases')
@patch('corehq.apps.cachehq.signals.invalidate_document')
@es_test(requires=[user_adapter], setup_class=True)
class TestUserSyncToEs(SimpleTestCase):
@sync_users_to_es()
def test_sync_to_es_create_update_delete(self, *mocks):
domain = 'user_es_domain'
user = CommCareUser(
domain=domain,
username='user1',
_id=uuid.uuid4().hex,
is_active=True,
first_name='user1 first name',
last_name='user1 last name',
location_id='location1'
)
user.save()
self.check_user(user)
user.first_name = 'new first name'
user.save()
self.check_user(user)
# simulate retire without needing couch
user.base_doc += DELETED_SUFFIX
user.save()
manager.index_refresh(user_adapter.index_name)
self.assertFalse(user_adapter.exists(user._id))
def check_user(self, user):
manager.index_refresh(user_adapter.index_name)
results = get_user_stubs([user._id])
self.assertEqual(len(results), 1)
self.assertEqual(results[0], {
'_id': user._id,
'domain': user.domain,
'username': user.username,
'is_active': True,
'first_name': user.first_name,
'last_name': user.last_name,
'doc_type': user.doc_type,
'location_id': 'location1',
'__group_ids': []
})
@es_test(requires=[user_adapter])
class TestElasticSyncPatch(SimpleTestCase):
class MockUser:
user_id = "ab12"
def to_be_deleted(self):
return False
def to_json(self):
return {"_id": self.user_id, "username": "test"}
def METHOD_NAME(self):
user = self.MockUser()
self.assertFalse(user_adapter.exists(user.user_id))
update_user_in_es(None, user)
self.assertFalse(user_adapter.exists(user.user_id))
@sync_users_to_es()
def test_user_sync_is_enabled_with_decorator(self):
def simple_doc(user):
user_json = user.to_json()
return (user_json.pop('_id'), user_json)
user = self.MockUser()
self.assertFalse(user_adapter.exists(user.user_id))
with patch.object(user_adapter, 'from_python', simple_doc):
update_user_in_es(None, user)
self.assertTrue(user_adapter.exists(user.user_id)) |
299,873 | rhtml rule10 | # Leo colorizer control file for rhtml mode.
# This file is in the public domain.
# Properties for rhtml mode.
properties = {
"commentEnd": "%>",
"commentStart": "<%#",
}
# Attributes dict for rhtml_main ruleset.
rhtml_main_attributes_dict = {
"default": "null",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for rhtml_tags ruleset.
rhtml_tags_attributes_dict = {
"default": "MARKUP",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Attributes dict for rhtml_tags_literal ruleset.
rhtml_tags_literal_attributes_dict = {
"default": "LITERAL1",
"digit_re": "",
"escape": "",
"highlight_digits": "true",
"ignore_case": "true",
"no_word_sep": "",
}
# Dictionary of attributes dictionaries for rhtml mode.
attributesDictDict = {
"rhtml_main": rhtml_main_attributes_dict,
"rhtml_tags": rhtml_tags_attributes_dict,
"rhtml_tags_literal": rhtml_tags_literal_attributes_dict,
}
# Keywords dict for rhtml_main ruleset.
rhtml_main_keywords_dict = {}
# Keywords dict for rhtml_tags ruleset.
rhtml_tags_keywords_dict = {}
# Keywords dict for rhtml_tags_literal ruleset.
rhtml_tags_literal_keywords_dict = {}
# Dictionary of keywords dictionaries for rhtml mode.
keywordsDictDict = {
"rhtml_main": rhtml_main_keywords_dict,
"rhtml_tags": rhtml_tags_keywords_dict,
"rhtml_tags_literal": rhtml_tags_literal_keywords_dict,
}
# Rules for rhtml_main ruleset.
def rhtml_rule0(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="<%#", end="%>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule1(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="ruby::main", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule2(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<%", end="%>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="ruby::main", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule3(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="<!--", end="-->",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule4(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<SCRIPT", end="</SCRIPT>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="html::javascript", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule5(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<STYLE", end="</STYLE>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="html::css", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule6(colorer, s, i):
return colorer.match_span(s, i, kind="keyword2", begin="<!", end=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="xml::dtd-tags", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule7(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<", end=">",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="rhtml::tags", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule8(colorer, s, i):
return colorer.match_span(s, i, kind="literal2", begin="&", end=";",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=True)
# Rules dict for rhtml_main ruleset.
rulesDict1 = {
"&": [rhtml_rule8,],
"<": [rhtml_rule0, rhtml_rule1, rhtml_rule2, rhtml_rule3, rhtml_rule4, rhtml_rule5, rhtml_rule6, rhtml_rule7,],
}
# Rules for rhtml_tags ruleset.
def rhtml_rule9(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="<!--", end="-->",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def METHOD_NAME(colorer, s, i):
return colorer.match_span(s, i, kind="comment1", begin="<%#", end="%>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule11(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="\"", end="\"",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="rhtml::tags_literal", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule12(colorer, s, i):
return colorer.match_span(s, i, kind="literal1", begin="'", end="'",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="rhtml::tags_literal", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule13(colorer, s, i):
return colorer.match_seq(s, i, kind="operator", seq="=",
at_line_start=False, at_whitespace_end=False, at_word_start=False, delegate="")
# Rules dict for rhtml_tags ruleset.
rulesDict2 = {
"\"": [rhtml_rule11,],
"'": [rhtml_rule12,],
"<": [rhtml_rule9, METHOD_NAME,],
"=": [rhtml_rule13,],
}
# Rules for rhtml_tags_literal ruleset.
def rhtml_rule14(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<%", end="%>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
def rhtml_rule15(colorer, s, i):
return colorer.match_span(s, i, kind="markup", begin="<%=", end="%>",
at_line_start=False, at_whitespace_end=False, at_word_start=False,
delegate="", exclude_match=False,
no_escape=False, no_line_break=False, no_word_break=False)
# Rules dict for rhtml_tags_literal ruleset.
rulesDict3 = {
"<": [rhtml_rule14, rhtml_rule15,],
}
# x.rulesDictDict for rhtml mode.
rulesDictDict = {
"rhtml_main": rulesDict1,
"rhtml_tags": rulesDict2,
"rhtml_tags_literal": rulesDict3,
}
# Import dict for rhtml mode.
importDict = {} |
299,874 | test revoke o auth2 token | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v1.2.1
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import unittest
import ory_client
from ory_client.api.o_auth2_api import OAuth2Api # noqa: E501
class TestOAuth2Api(unittest.TestCase):
"""OAuth2Api unit test stubs"""
def setUp(self):
self.api = OAuth2Api() # noqa: E501
def tearDown(self):
pass
def test_accept_o_auth2_consent_request(self):
"""Test case for accept_o_auth2_consent_request
Accept OAuth 2.0 Consent Request # noqa: E501
"""
pass
def test_accept_o_auth2_login_request(self):
"""Test case for accept_o_auth2_login_request
Accept OAuth 2.0 Login Request # noqa: E501
"""
pass
def test_accept_o_auth2_logout_request(self):
"""Test case for accept_o_auth2_logout_request
Accept OAuth 2.0 Session Logout Request # noqa: E501
"""
pass
def test_create_o_auth2_client(self):
"""Test case for create_o_auth2_client
Create OAuth 2.0 Client # noqa: E501
"""
pass
def test_delete_o_auth2_client(self):
"""Test case for delete_o_auth2_client
Delete OAuth 2.0 Client # noqa: E501
"""
pass
def test_delete_o_auth2_token(self):
"""Test case for delete_o_auth2_token
Delete OAuth 2.0 Access Tokens from specific OAuth 2.0 Client # noqa: E501
"""
pass
def test_delete_trusted_o_auth2_jwt_grant_issuer(self):
"""Test case for delete_trusted_o_auth2_jwt_grant_issuer
Delete Trusted OAuth2 JWT Bearer Grant Type Issuer # noqa: E501
"""
pass
def test_get_o_auth2_client(self):
"""Test case for get_o_auth2_client
Get an OAuth 2.0 Client # noqa: E501
"""
pass
def test_get_o_auth2_consent_request(self):
"""Test case for get_o_auth2_consent_request
Get OAuth 2.0 Consent Request # noqa: E501
"""
pass
def test_get_o_auth2_login_request(self):
"""Test case for get_o_auth2_login_request
Get OAuth 2.0 Login Request # noqa: E501
"""
pass
def test_get_o_auth2_logout_request(self):
"""Test case for get_o_auth2_logout_request
Get OAuth 2.0 Session Logout Request # noqa: E501
"""
pass
def test_get_trusted_o_auth2_jwt_grant_issuer(self):
"""Test case for get_trusted_o_auth2_jwt_grant_issuer
Get Trusted OAuth2 JWT Bearer Grant Type Issuer # noqa: E501
"""
pass
def test_introspect_o_auth2_token(self):
"""Test case for introspect_o_auth2_token
Introspect OAuth2 Access and Refresh Tokens # noqa: E501
"""
pass
def test_list_o_auth2_clients(self):
"""Test case for list_o_auth2_clients
List OAuth 2.0 Clients # noqa: E501
"""
pass
def test_list_o_auth2_consent_sessions(self):
"""Test case for list_o_auth2_consent_sessions
List OAuth 2.0 Consent Sessions of a Subject # noqa: E501
"""
pass
def test_list_trusted_o_auth2_jwt_grant_issuers(self):
"""Test case for list_trusted_o_auth2_jwt_grant_issuers
List Trusted OAuth2 JWT Bearer Grant Type Issuers # noqa: E501
"""
pass
def test_o_auth2_authorize(self):
"""Test case for o_auth2_authorize
OAuth 2.0 Authorize Endpoint # noqa: E501
"""
pass
def test_oauth2_token_exchange(self):
"""Test case for oauth2_token_exchange
The OAuth 2.0 Token Endpoint # noqa: E501
"""
pass
def test_patch_o_auth2_client(self):
"""Test case for patch_o_auth2_client
Patch OAuth 2.0 Client # noqa: E501
"""
pass
def test_reject_o_auth2_consent_request(self):
"""Test case for reject_o_auth2_consent_request
Reject OAuth 2.0 Consent Request # noqa: E501
"""
pass
def test_reject_o_auth2_login_request(self):
"""Test case for reject_o_auth2_login_request
Reject OAuth 2.0 Login Request # noqa: E501
"""
pass
def test_reject_o_auth2_logout_request(self):
"""Test case for reject_o_auth2_logout_request
Reject OAuth 2.0 Session Logout Request # noqa: E501
"""
pass
def test_revoke_o_auth2_consent_sessions(self):
"""Test case for revoke_o_auth2_consent_sessions
Revoke OAuth 2.0 Consent Sessions of a Subject # noqa: E501
"""
pass
def test_revoke_o_auth2_login_sessions(self):
"""Test case for revoke_o_auth2_login_sessions
Revokes OAuth 2.0 Login Sessions by either a Subject or a SessionID # noqa: E501
"""
pass
def METHOD_NAME(self):
"""Test case for revoke_o_auth2_token
Revoke OAuth 2.0 Access or Refresh Token # noqa: E501
"""
pass
def test_set_o_auth2_client(self):
"""Test case for set_o_auth2_client
Set OAuth 2.0 Client # noqa: E501
"""
pass
def test_set_o_auth2_client_lifespans(self):
"""Test case for set_o_auth2_client_lifespans
Set OAuth2 Client Token Lifespans # noqa: E501
"""
pass
def test_trust_o_auth2_jwt_grant_issuer(self):
"""Test case for trust_o_auth2_jwt_grant_issuer
Trust OAuth2 JWT Bearer Grant Type Issuer # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main() |
299,875 | create instance | from pprint import pprint
import hashlib
import time
from owlready2 import get_ontology, DataProperty, ObjectProperty
class OntoAgentGenerator:
def __init__(self, agent_name):
# get the tbox of ontoagent
ontoagent_tbox = 'http://www.theworldavatar.com/ontology/ontoagent/MSM.owl'
onto = get_ontology(ontoagent_tbox).load()
self.ontoagent = onto
self.name = agent_name
self.this_agent = get_ontology('http://www.theworldavatar.com/kb/agents/Service__%s.owl#' % self.name)
self.qualifiers_dict = {} # make a name -> object dict for qualifiers
with self.ontoagent:
class hasNerLabel(DataProperty):
domain = [self.ontoagent.MessagePart]
range = [str]
class hasQuestionTemplates(DataProperty):
domain = [self.ontoagent.Operation]
range = [str]
class hasQualifier(ObjectProperty):
domain = [self.ontoagent.MessagePart]
range = [self.ontoagent.MessagePart]
# check the integrity of the core classes and attributes
pprint(self.ontoagent.Service)
pprint(self.ontoagent.Operation) # http://www.theworldavatar.com/kb/agents/Service__DFT.owl#
pprint(self.ontoagent.MessagePart)
pprint(self.ontoagent.MessageContent)
pprint(self.ontoagent.hasOutput)
pprint(self.ontoagent.hasInput)
pprint(self.ontoagent.hasHttpUrl)
pprint(self.ontoagent.isArray)
pprint(self.ontoagent.hasName)
pprint(self.ontoagent.hasType)
pprint(self.ontoagent.hasOperation)
pprint(self.ontoagent.hasMandatoryPart)
pprint(self.ontoagent.hasNerLabel)
pprint(self.ontoagent.hasQuestionTemplates)
pprint(self.ontoagent.hasQualifier)
self.ontoagent.save('OntoAgent.owl', format='rdfxml')
# create the node id with hash
def generate_id(self, mode, extra_info=''):
content = (self.name + mode + extra_info).encode('utf-8')
hash_object = hashlib.sha1(content)
hex_dig = hash_object.hexdigest()
return mode + '_' + str(hex_dig)
def create_a_message_part(self, parameters):
# data_name, data_type, is_array,
data_name = parameters['data_name']
data_type = parameters['data_type']
is_array = parameters['is_array']
ner_label = parameters['ner_label']
message_part = self.ontoagent.MessagePart(self.generate_id('MessagePart',
extra_info=data_name), namespace=self.this_agent)
# 7. the most important part, declare its type, name, whether it is an array
message_part.hasType.append(data_type)
message_part.hasName.append(data_name)
message_part.isArray.append(is_array)
message_part.hasNerLabel.append(ner_label)
return message_part
def attach_input_output(self, operation, parameters, mode):
data_name = parameters['data_name']
# 4. create MessageContent
message_content = self.ontoagent.MessageContent(self.generate_id('MessageContent', extra_info=data_name),
namespace=self.this_agent)
# 5. attach the MessageContent to the operation
if mode == 'input':
operation.hasInput.append(message_content)
else:
operation.hasOutput.append(message_content)
message_part = self.create_a_message_part(parameters)
# 8. connect MessagePart to MessageContent
message_content.hasMandatoryPart.append(message_part)
if 'has_qualifier' in parameters:
qualifier_name_list = parameters['has_qualifier']
qualifiers_list = []
for qualifier_name in qualifier_name_list:
if qualifier_name in self.qualifiers_dict:
qualifier_object = self.qualifiers_dict[qualifier_name]
qualifier_message_part = self.create_a_message_part(qualifier_object)
qualifiers_list.append(qualifier_message_part)
message_part.hasQualifier = qualifiers_list
def get_qualifiers(self, agent_object):
if 'qualifiers' in agent_object:
qualifiers = agent_object['qualifiers']
for qualifier in qualifiers:
data_name = qualifier['data_name']
self.qualifiers_dict[data_name] = qualifier
def METHOD_NAME(self, agent_object):
# 1. create service
service = self.ontoagent.Service(self.generate_id('Service'), namespace=self.this_agent)
# 2. create operation, attach the operation to the service
operation = self.ontoagent.Operation(self.generate_id('Operation'), namespace=self.this_agent)
service.hasOperation.append(operation)
# 10. create the nodes for qualifiers, as message parts
self.get_qualifiers(agent_object)
pprint(self.qualifiers_dict)
# 3. give the operation a url
http_url = agent_object['http_url']
operation.hasHttpUrl.append(http_url)
# 9. attach the input/output
inputs = agent_object['inputs']
for input in inputs:
self.attach_input_output(operation, input, 'input')
outputs = agent_object['outputs']
for output in outputs:
self.attach_input_output(operation, output, 'output')
question_templates = agent_object['question_templates']
# 11. add questions templates to operation node
operation.hasQuestionTemplates = question_templates
if __name__ == '__main__':
agent = {
"question_templates": ['[%s](attribute) [%s](species)', '[%s](attribute) of [%s](species)'],
"http_url": "http://somewhereincmcl.com/pce",
"outputs": [
{
"data_name": "power conversion efficiency",
"data_type": "http://fake_concept_for_power_conversion_efficiency",
"is_array": False,
"ner_label": "attribute"
}
],
"inputs": [
{
"data_name": "species",
"data_type": "http://fake_concept_for_species",
"is_array": False,
"ner_label": "species"
}
]
}
og = OntoAgentGenerator('PCE_Agent')
og.METHOD_NAME(agent)
og.this_agent.save('test', format='rdfxml') |
299,876 | write string | #!/usr/bin/python3
import argparse
import glob
import os
import time
import random
COLOURS = (b'\xFF\x00\x00', b'\x00\xFF\x00', b'\x00\x00\xFF',
b'\xFF\xFF\x00', b'\xFF\x00\xFF', b'\x00\xFF\xFF')
def write_binary(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'wb') as open_file:
open_file.write(payload)
def read_string(driver_path, device_file):
with open(os.path.join(driver_path, device_file), 'r') as open_file:
return open_file.read().rstrip('\n')
def METHOD_NAME(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'w') as open_file:
open_file.write(payload)
def find_devices(vid, pid):
driver_paths = glob.glob(os.path.join(
'/sys/bus/hid/drivers/razeraccessory', '*:{0:04X}:{1:04X}.*'.format(vid, pid)))
for driver_path in driver_paths:
device_type_path = os.path.join(driver_path, 'device_type')
if os.path.exists(device_type_path):
yield driver_path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--skip-standard', action='store_true')
parser.add_argument('--skip-custom', action='store_true')
parser.add_argument('--skip-game-led', action='store_true')
parser.add_argument('--skip-macro-led', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
found_chroma = False
for index, driver_path in enumerate(find_devices(0x1532, 0x0C02), start=1):
found_chroma = True
print("Razer Goliathus {0}\n".format(index))
print("Driver version: {0}".format(
read_string(driver_path, 'version')))
print("Driver firmware version: {0}".format(
read_string(driver_path, 'firmware_version')))
print("Device serial: {0}".format(
read_string(driver_path, 'device_serial')))
print("Device type: {0}".format(
read_string(driver_path, 'device_type')))
print("Device mode: {0}".format(
read_string(driver_path, 'device_mode')))
# Set to static red so that we have something standard
write_binary(driver_path, 'matrix_effect_static', b'\xFF\x00\x00')
if not args.skip_standard:
print("Starting brightness test. Press enter to begin.")
input()
print("Max brightness...", end='')
METHOD_NAME(driver_path, 'matrix_brightness', '255')
time.sleep(1)
print("brightness ({0})".format(
read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Half brightness...", end='')
METHOD_NAME(driver_path, 'matrix_brightness', '128')
time.sleep(1)
print("brightness ({0})".format(
read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Zero brightness...", end='')
METHOD_NAME(driver_path, 'matrix_brightness', '0')
time.sleep(1)
print("brightness ({0})".format(
read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
METHOD_NAME(driver_path, 'matrix_brightness', '255')
print("Starting reactive tests. Press enter to begin.")
input()
print("Reactive blue")
write_binary(driver_path, 'matrix_effect_reactive', b'\x01\x00\x00\xFF')
time.sleep(2)
print("Trigger reactive")
METHOD_NAME(driver_path, 'matrix_reactive_trigger', '1')
time.sleep(2)
print("Trigger reactive")
METHOD_NAME(driver_path, 'matrix_reactive_trigger', '1')
time.sleep(2)
print("Trigger reactive")
METHOD_NAME(driver_path, 'matrix_reactive_trigger', '1')
time.sleep(2)
print("Trigger reactive")
METHOD_NAME(driver_path, 'matrix_reactive_trigger', '1')
time.sleep(2)
print("Trigger reactive")
METHOD_NAME(driver_path, 'matrix_reactive_trigger', '1')
print("Starting other colour effect tests. Press enter to begin.")
input()
print("Green Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\x00')
time.sleep(5)
print("Cyan Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\xFF')
time.sleep(5)
print("Spectrum")
write_binary(driver_path, 'matrix_effect_spectrum', b'\x00')
time.sleep(10)
print("None")
write_binary(driver_path, 'matrix_effect_none', b'\x00')
time.sleep(5)
print("Breathing random")
write_binary(driver_path, 'matrix_effect_breath', b'\x00')
time.sleep(10)
print("Breathing red")
write_binary(driver_path, 'matrix_effect_breath', b'\xFF\x00\x00')
time.sleep(10)
print("Breathing blue-green")
write_binary(driver_path, 'matrix_effect_breath',
b'\x00\xFF\x00\x00\x00\xFF')
time.sleep(10)
if not args.skip_custom:
# row, start_col, end_col
payload_all = b'\x00\x00\x00'
# add 1 column (end_col + 1 - start_col == 0) of LEDs (1 LED)
payload_all += random.choice(COLOURS)
payload_white = b'\x00\x00\x00'
payload_white += b'\xFF\xFF\xFF'
print("Custom LED matrix colours test")
print("Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_all)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
print("Custom LED matrix partial colours test")
print("Set LED to white. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_white)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
time.sleep(0.5)
print("Finished")
if not found_chroma:
print("No Goliathus found") |
299,877 | add coloring to emit ansi | import logging
import sys
from collections.abc import Callable
from typing import Optional
from yt.utilities.configure import YTConfig, configuration_callbacks
_yt_sh: Optional[logging.StreamHandler] = None
_original_emitter: Optional[Callable[[logging.LogRecord], None]] = None
def set_log_level(level):
"""
Select which minimal logging level should be displayed.
Parameters
----------
level: int or str
Possible values by increasing level:
0 or "notset"
1 or "all"
10 or "debug"
20 or "info"
30 or "warning"
40 or "error"
50 or "critical"
"""
# this is a user-facing interface to avoid importing from yt.utilities in user code.
if isinstance(level, str):
level = level.upper()
if level == "ALL": # non-standard alias
level = 1
ytLogger.setLevel(level)
ytLogger.debug("Set log level to %s", level)
ytLogger = logging.getLogger("yt")
class DuplicateFilter(logging.Filter):
"""A filter that removes duplicated successive log entries."""
# source
# https://stackoverflow.com/questions/44691558/suppress-multiple-messages-with-same-content-in-python-logging-module-aka-log-co
def filter(self, record):
current_log = (record.module, record.levelno, record.msg, record.args)
if current_log != getattr(self, "last_log", None):
self.last_log = current_log
return True
return False
ytLogger.addFilter(DuplicateFilter())
class DeprecatedFieldFilter(logging.Filter):
"""A filter that suppresses repeated logging of deprecated field warnings"""
def __init__(self, name=""):
self.logged_fields = []
super().__init__(name=name)
def filter(self, record):
if not record.msg.startswith("The Derived Field"):
return True
field = record.args[0]
if field in self.logged_fields:
return False
self.logged_fields.append(field)
return True
ytLogger.addFilter(DeprecatedFieldFilter())
# This next bit is grabbed from:
# http://stackoverflow.com/questions/384076/how-can-i-make-the-python-logging-output-to-be-colored
def METHOD_NAME(fn):
# add methods we need to the class
def new(*args):
levelno = args[0].levelno
if levelno >= 50:
color = "\x1b[31m" # red
elif levelno >= 40:
color = "\x1b[31m" # red
elif levelno >= 30:
color = "\x1b[33m" # yellow
elif levelno >= 20:
color = "\x1b[32m" # green
elif levelno >= 10:
color = "\x1b[35m" # pink
else:
color = "\x1b[0m" # normal
ln = color + args[0].levelname + "\x1b[0m"
args[0].levelname = ln
return fn(*args)
return new
ufstring = "%(name)-3s: [%(levelname)-9s] %(asctime)s %(message)s"
cfstring = "%(name)-3s: [%(levelname)-18s] %(asctime)s %(message)s"
def colorize_logging():
f = logging.Formatter(cfstring)
ytLogger.handlers[0].setFormatter(f)
ytLogger.handlers[0].emit = METHOD_NAME(ytLogger.handlers[0].emit)
def uncolorize_logging():
global _original_emitter, _yt_sh
if None not in (_original_emitter, _yt_sh):
f = logging.Formatter(ufstring)
ytLogger.handlers[0].setFormatter(f)
_yt_sh.emit = _original_emitter
def disable_stream_logging():
if len(ytLogger.handlers) > 0:
ytLogger.removeHandler(ytLogger.handlers[0])
h = logging.NullHandler()
ytLogger.addHandler(h)
def _runtime_configuration(ytcfg: YTConfig) -> None:
# only run this at the end of yt.__init__, after yt.config.ytcfg was instantiated
global _original_emitter, _yt_sh
if ytcfg.get("yt", "stdout_stream_logging"):
stream = sys.stdout
else:
stream = sys.stderr
_level = min(max(ytcfg.get("yt", "log_level"), 0), 50)
if ytcfg.get("yt", "suppress_stream_logging"):
disable_stream_logging()
else:
_yt_sh = logging.StreamHandler(stream=stream)
# create formatter and add it to the handlers
formatter = logging.Formatter(ufstring)
_yt_sh.setFormatter(formatter)
# add the handler to the logger
ytLogger.addHandler(_yt_sh)
ytLogger.setLevel(_level)
ytLogger.propagate = False
_original_emitter = _yt_sh.emit
if ytcfg.get("yt", "colored_logs"):
colorize_logging()
configuration_callbacks.append(_runtime_configuration) |
299,878 | test eq | from typing import Any
from pytest import mark, param
from omegaconf import AnyNode, DictConfig, ListConfig, OmegaConf
from tests import Group, User
@mark.parametrize(
"i1,i2",
[
# === LISTS ===
# empty list
param([], [], id="empty"),
# simple list
param(["a", 12, "15"], ["a", 12, "15"], id="simple_list"),
# raw vs any
([1, 2, 12], [1, 2, AnyNode(12)]),
# nested empty dict
([12, {}], [12, {}]),
# nested dict
([12, {"c": 10}], [12, {"c": 10}]),
# nested list
([1, 2, 3, [10, 20, 30]], [1, 2, 3, [10, 20, 30]]),
# nested list with any
([1, 2, 3, [1, 2, AnyNode(3)]], [1, 2, 3, [1, 2, AnyNode(3)]]),
# === DICTS ===
# empty
({}, {}),
# simple
({"a": 12}, {"a": 12}),
# any vs raw
({"a": 12}, {"a": AnyNode(12)}),
# nested dict empty
(dict(a=12, b=dict()), dict(a=12, b=dict())),
# nested dict
(dict(a=12, b=dict(c=10)), dict(a=12, b=dict(c=10))),
# nested list
(dict(a=12, b=[1, 2, 3]), dict(a=12, b=[1, 2, 3])),
# nested list with any
(dict(a=12, b=[1, 2, AnyNode(3)]), dict(a=12, b=[1, 2, AnyNode(3)])),
# In python 3.6+ insert order changes iteration order. this ensures that equality is preserved.
(dict(a=1, b=2, c=3, d=4, e=5), dict(e=5, b=2, c=3, d=4, a=1)),
(DictConfig(content=None), DictConfig(content=None)),
param({"a": [1, 2]}, {"a": [1, 2]}, id="list_in_dict"),
# With interpolations
([10, "${0}"], [10, 10]),
(dict(a=12, b="${a}"), dict(a=12, b=12)),
# With missing interpolation
param([10, "${0}"], [10, 10], id="list_simple_interpolation"),
param({"a": "${ref_error}"}, {"a": "${ref_error}"}, id="dict==dict,ref_error"),
param({"a": "???"}, {"a": "???"}, id="dict==dict,missing"),
param(User, User, id="User==User"),
param({"name": "poo", "age": 7}, User(name="poo", age=7), id="dict==User"),
param(Group, Group, id="Group==Group"),
param({"group": {"admin": None}}, {"group": Group}, id="dict==Group"),
param(
{"i1": "${n1}", "n1": {"a": 10}},
{"i1": "${n1}", "n1": {"a": 10}},
id="node_interpolation",
),
# Inter containers
param(
{"foo": DictConfig(content="${bar}"), "bar": 10},
{"foo": 10, "bar": 10},
id="dictconfig_inter",
),
param(
{"foo": ListConfig(content="${bar}"), "bar": 10},
{"foo": 10, "bar": 10},
id="listconfig_inter",
),
# None containers
param({"foo": DictConfig(content=None)}, {"foo": None}, id="dictconfig_none"),
param({"foo": ListConfig(content=None)}, {"foo": None}, id="listconfig_none"),
# Missing containers
param(DictConfig("???"), DictConfig("???"), id="missing_dictconfig"),
param(ListConfig("???"), ListConfig("???"), id="missing_listconfig"),
param(
{"foo": DictConfig("???")}, {"foo": "???"}, id="nested_missing_dictconfig"
),
param(
{"foo": ListConfig("???")}, {"foo": "???"}, id="nested_missing_listconfig"
),
],
)
def METHOD_NAME(i1: Any, i2: Any) -> None:
c1 = OmegaConf.create(i1)
c2 = OmegaConf.create(i2)
def eq(a: Any, b: Any) -> None:
assert a == b
assert b == a
assert not a != b
assert not b != a
eq(c1, c2)
eq(c1, i1)
eq(c2, i2)
@mark.parametrize(
"cfg,other",
[
param(DictConfig("???"), "???", id="missing_dictconfig"),
param(ListConfig("???"), "???", id="missing_listconfig"),
],
)
def test_missing_container_string_eq(cfg: Any, other: Any) -> None:
assert cfg == other
assert other == cfg
assert not (cfg != other)
assert not (other != cfg)
@mark.parametrize(
"input1, input2",
[
# Dicts
param({}, {"a": 10}, id="empty_dict_neq_dict"),
param({}, [], id="empty_dict_vs_list"),
param({}, None, id="dict_neq_none"),
param({"foo": None}, {"foo": "bar"}, id="dict_none_neq_dict_not_none"),
param({"a": 12}, {"a": 13}, id="simple_dict_neq"),
param({"a": 0}, {"b": 0}, id="different_key_same_value"),
param(dict(a=12), dict(a=AnyNode(13))),
param(dict(a=12, b=dict()), dict(a=13, b=dict())),
param(dict(a=12, b=dict(c=10)), dict(a=13, b=dict(c=10))),
param(dict(a=12, b=[1, 2, 3]), dict(a=12, b=[10, 2, 3])),
param(dict(a=12, b=[1, 2, AnyNode(3)]), dict(a=12, b=[1, 2, AnyNode(30)])),
# Lists
param([], [10], id="list:empty_vs_full"),
param([10], [11], id="list:different_value"),
([12], [AnyNode(13)]),
([12, dict()], [13, dict()]),
([12, dict(c=10)], [13, dict(c=10)]),
([12, [1, 2, 3]], [12, [10, 2, 3]]),
([12, [1, 2, AnyNode(3)]], [12, [1, 2, AnyNode(30)]]),
(dict(a="${foo1}"), dict(a="${foo2}")),
param(
{"i1": "${n1}", "n1": {"a": 10}},
{"i1": "${n1}", "n1": {"a": 20}},
id="node_interpolation",
),
],
)
def test_not_eq(input1: Any, input2: Any) -> None:
c1 = OmegaConf.create(input1)
c2 = OmegaConf.create(input2)
def neq(a: Any, b: Any) -> None:
assert a != b
assert b != a
assert not a == b
assert not b == a
neq(c1, c2)
neq(c2, c1)
# ---
def test_config_eq_mismatch_types() -> None:
c1 = OmegaConf.create({})
c2 = OmegaConf.create([])
assert c1 != c2
def test_dict_not_eq_with_another_class() -> None:
assert OmegaConf.create({}) != "string"
assert OmegaConf.create([]) != "string" |
299,879 | patch | import os
import molten
from ddtrace.internal.constants import COMPONENT
from ddtrace.internal.schema.span_attribute_schema import SpanDirection
from ddtrace.vendor import wrapt
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
from .. import trace_utils
from ... import Pin
from ... import config
from ...constants import ANALYTICS_SAMPLE_RATE_KEY
from ...constants import SPAN_KIND
from ...constants import SPAN_MEASURED_KEY
from ...ext import SpanKind
from ...ext import SpanTypes
from ...internal.compat import urlencode
from ...internal.schema import schematize_service_name
from ...internal.schema import schematize_url_operation
from ...internal.utils.formats import asbool
from ...internal.utils.importlib import func_name
from ...internal.utils.version import parse_version
from ..trace_utils import unwrap as _u
from .wrappers import MOLTEN_ROUTE
from .wrappers import WrapperComponent
from .wrappers import WrapperMiddleware
from .wrappers import WrapperRenderer
from .wrappers import WrapperRouter
MOLTEN_VERSION = parse_version(molten.__version__)
# Configure default configuration
config._add(
"molten",
dict(
_default_service=schematize_service_name("molten"),
distributed_tracing=asbool(os.getenv("DD_MOLTEN_DISTRIBUTED_TRACING", default=True)),
),
)
def get_version():
# type: () -> str
return getattr(molten, "__version__", "")
def METHOD_NAME():
"""Patch the instrumented methods"""
if getattr(molten, "_datadog_patch", False):
return
molten._datadog_patch = True
pin = Pin()
# add pin to module since many classes use __slots__
pin.onto(molten)
_w(molten.BaseApp, "__init__", patch_app_init)
_w(molten.App, "__call__", patch_app_call)
def unpatch():
"""Remove instrumentation"""
if getattr(molten, "_datadog_patch", False):
molten._datadog_patch = False
# remove pin
pin = Pin.get_from(molten)
if pin:
pin.remove_from(molten)
_u(molten.BaseApp, "__init__")
_u(molten.App, "__call__")
def patch_app_call(wrapped, instance, args, kwargs):
"""Patch wsgi interface for app"""
pin = Pin.get_from(molten)
if not pin or not pin.enabled():
return wrapped(*args, **kwargs)
# DEV: This is safe because this is the args for a WSGI handler
# https://www.python.org/dev/peps/pep-3333/
environ, start_response = args
request = molten.http.Request.from_environ(environ)
resource = func_name(wrapped)
# request.headers is type Iterable[Tuple[str, str]]
trace_utils.activate_distributed_headers(
pin.tracer, int_config=config.molten, request_headers=dict(request.headers)
)
with pin.tracer.trace(
schematize_url_operation("molten.request", protocol="http", direction=SpanDirection.INBOUND),
service=trace_utils.int_service(pin, config.molten),
resource=resource,
span_type=SpanTypes.WEB,
) as span:
span.set_tag_str(COMPONENT, config.molten.integration_name)
# set span.kind tag equal to type of operation being performed
span.set_tag_str(SPAN_KIND, SpanKind.SERVER)
span.set_tag(SPAN_MEASURED_KEY)
# set analytics sample rate with global config enabled
span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, config.molten.get_analytics_sample_rate(use_global_config=True))
@wrapt.function_wrapper
def _w_start_response(wrapped, instance, args, kwargs):
"""Patch respond handling to set metadata"""
pin = Pin.get_from(molten)
if not pin or not pin.enabled():
return wrapped(*args, **kwargs)
status, headers, exc_info = args
code, _, _ = status.partition(" ")
try:
code = int(code)
except ValueError:
pass
if not span.get_tag(MOLTEN_ROUTE):
# if route never resolve, update root resource
span.resource = u"{} {}".format(request.method, code)
trace_utils.set_http_meta(span, config.molten, status_code=code)
return wrapped(*args, **kwargs)
# patching for extracting response code
start_response = _w_start_response(start_response)
url = "%s://%s:%s%s" % (
request.scheme,
request.host,
request.port,
request.path,
)
query = urlencode(dict(request.params))
trace_utils.set_http_meta(
span, config.molten, method=request.method, url=url, query=query, request_headers=request.headers
)
span.set_tag_str("molten.version", molten.__version__)
return wrapped(environ, start_response, **kwargs)
def patch_app_init(wrapped, instance, args, kwargs):
"""Patch app initialization of middleware, components and renderers"""
# allow instance to be initialized before wrapping them
wrapped(*args, **kwargs)
# add Pin to instance
pin = Pin.get_from(molten)
if not pin or not pin.enabled():
return
# Wrappers here allow us to trace objects without altering class or instance
# attributes, which presents a problem when classes in molten use
# ``__slots__``
instance.router = WrapperRouter(instance.router)
# wrap middleware functions/callables
instance.middleware = [WrapperMiddleware(mw) for mw in instance.middleware]
# wrap components objects within injector
# NOTE: the app instance also contains a list of components but it does not
# appear to be used for anything passing along to the dependency injector
instance.injector.components = [WrapperComponent(c) for c in instance.injector.components]
# but renderers objects
instance.renderers = [WrapperRenderer(r) for r in instance.renderers] |
299,880 | sort alphabetical | #!/usr/bin/env python3
# This file is part of Firejail project
# Copyright (C) 2014-2023 Firejail Authors
# License GPL v2
# Requirements:
# python >= 3.6
from os import path
from sys import argv, exit as sys_exit, stderr
__doc__ = f"""\
Sort the arguments of commands in profiles.
Usage: {path.basename(argv[0])} [/path/to/profile ...]
The following commands are supported:
private-bin, private-etc, private-lib, caps.drop, caps.keep, seccomp.drop,
seccomp.drop, protocol
Note that this is only applicable to commands that support multiple arguments.
Keep in mind that this will overwrite your profile(s).
Examples:
$ {argv[0]} MyAwesomeProfile.profile
$ {argv[0]} new_profile.profile second_new_profile.profile
$ {argv[0]} ~/.config/firejail/*.{{profile,inc,local}}
$ sudo {argv[0]} /etc/firejail/*.{{profile,inc,local}}
Exit Codes:
0: Success: No profiles needed fixing.
1: Error: One or more profiles could not be processed correctly.
2: Error: Missing arguments.
101: Info: One or more profiles were fixed.
"""
def METHOD_NAME(original_items):
items = original_items.split(",")
items.sort(key=str.casefold)
return ",".join(items)
def sort_protocol(original_protocols):
"""
Sort the given protocols into the following order:
unix,inet,inet6,netlink,packet,bluetooth
"""
# shortcut for common protocol lines
if original_protocols in ("unix", "unix,inet,inet6"):
return original_protocols
fixed_protocols = ""
for protocol in ("unix", "inet", "inet6", "netlink", "packet", "bluetooth"):
for prefix in ("", "-", "+", "="):
if f",{prefix}{protocol}," in f",{original_protocols},":
fixed_protocols += f"{prefix}{protocol},"
return fixed_protocols[:-1]
def fix_profile(filename):
with open(filename, "r+") as profile:
lines = profile.read().split("\n")
was_fixed = False
fixed_profile = []
for lineno, line in enumerate(lines, 1):
if line[:12] in ("private-bin ", "private-etc ", "private-lib "):
fixed_line = f"{line[:12]}{METHOD_NAME(line[12:])}"
elif line[:13] in ("seccomp.drop ", "seccomp.keep "):
fixed_line = f"{line[:13]}{METHOD_NAME(line[13:])}"
elif line[:10] in ("caps.drop ", "caps.keep "):
fixed_line = f"{line[:10]}{METHOD_NAME(line[10:])}"
elif line[:8] == "protocol":
fixed_line = f"protocol {sort_protocol(line[9:])}"
elif line[:8] == "seccomp ":
fixed_line = f"{line[:8]}{METHOD_NAME(line[8:])}"
else:
fixed_line = line
if fixed_line != line:
was_fixed = True
print(
f"{filename}:{lineno}:-{line}\n"
f"{filename}:{lineno}:+{fixed_line}"
)
fixed_profile.append(fixed_line)
if was_fixed:
profile.seek(0)
profile.truncate()
profile.write("\n".join(fixed_profile))
profile.flush()
print(f"[ Fixed ] {filename}")
return 101
return 0
def main(args):
if len(args) < 1:
print(__doc__, file=stderr)
return 2
print(f"sort.py: checking {len(args)} profile(s)...")
exit_code = 0
for filename in args:
try:
if exit_code not in (1, 101):
exit_code = fix_profile(filename)
else:
fix_profile(filename)
except FileNotFoundError as err:
print(f"[ Error ] {err}", file=stderr)
exit_code = 1
except PermissionError as err:
print(f"[ Error ] {err}", file=stderr)
exit_code = 1
except Exception as err:
print(
f"[ Error ] An error occurred while processing '{filename}': {err}",
file=stderr,
)
exit_code = 1
return exit_code
if __name__ == "__main__":
sys_exit(main(argv[1:])) |
299,881 | test should create a valid python3 expression | # coding=utf-8
import pytest
from mock import MagicMock
import pandas as pd
from sparkmagic.livyclientlib.exceptions import BadUserDataException
from sparkmagic.livyclientlib.command import Command
import sparkmagic.utils.constants as constants
from sparkmagic.livyclientlib.sendpandasdftosparkcommand import (
SendPandasDfToSparkCommand,
)
def test_send_to_scala():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._scala_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_SPARK,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._scala_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_send_to_r():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._r_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_SPARKR,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._r_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_send_to_python():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
maxrows = 1
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, maxrows
)
sparkcommand._pyspark_command = MagicMock(return_value=MagicMock())
sparkcommand.to_command(
constants.SESSION_KIND_PYSPARK,
input_variable_name,
input_variable_value,
output_variable_name,
)
sparkcommand._pyspark_command.assert_called_with(
input_variable_name, input_variable_value, output_variable_name
)
def test_should_create_a_valid_scala_expression():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
pandas_df_jsonized = """[{"A":1,"B":2}]"""
expected_scala_code = '''
val rdd_json_array = spark.sparkContext.makeRDD("""{}""" :: Nil)
val {} = spark.read.json(rdd_json_array)'''.format(
pandas_df_jsonized, output_variable_name
)
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, 1
)
assert sparkcommand._scala_command(
input_variable_name, input_variable_value, output_variable_name
) == Command(expected_scala_code)
def test_should_create_a_valid_r_expression():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
pandas_df_jsonized = """[{"A":1,"B":2}]"""
expected_r_code = """
fileConn<-file("temporary_pandas_df_sparkmagics.txt")
writeLines('{}', fileConn)
close(fileConn)
{} <- read.json("temporary_pandas_df_sparkmagics.txt")
{}.persist()
file.remove("temporary_pandas_df_sparkmagics.txt")""".format(
pandas_df_jsonized, output_variable_name, output_variable_name
)
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, 1
)
assert sparkcommand._r_command(
input_variable_name, input_variable_value, output_variable_name
) == Command(expected_r_code)
def METHOD_NAME():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
pandas_df_jsonized = """[{"A":1,"B":2}]"""
expected_python3_code = SendPandasDfToSparkCommand._python_decode
expected_python3_code += """
json_array = json_loads_byteified('{}')
rdd_json_array = spark.sparkContext.parallelize(json_array)
{} = spark.read.json(rdd_json_array)""".format(
pandas_df_jsonized, output_variable_name
)
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, 1
)
assert sparkcommand._pyspark_command(
input_variable_name, input_variable_value, output_variable_name
) == Command(expected_python3_code)
def test_should_create_a_valid_python2_expression():
input_variable_name = "input"
input_variable_value = pd.DataFrame({"A": [1], "B": [2]})
output_variable_name = "output"
pandas_df_jsonized = """[{"A":1,"B":2}]"""
expected_python2_code = SendPandasDfToSparkCommand._python_decode
expected_python2_code += """
json_array = json_loads_byteified('{}')
rdd_json_array = spark.sparkContext.parallelize(json_array)
{} = spark.read.json(rdd_json_array)""".format(
pandas_df_jsonized, output_variable_name
)
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, 1
)
assert sparkcommand._pyspark_command(
input_variable_name, input_variable_value, output_variable_name
) == Command(expected_python2_code)
def test_should_properly_limit_pandas_dataframe():
input_variable_name = "input"
max_rows = 1
input_variable_value = pd.DataFrame({"A": [0, 1, 2, 3, 4], "B": [5, 6, 7, 8, 9]})
output_variable_name = "output"
pandas_df_jsonized = (
"""[{"A":0,"B":5}]""" # notice we expect json to have dropped all but one row
)
expected_scala_code = '''
val rdd_json_array = spark.sparkContext.makeRDD("""{}""" :: Nil)
val {} = spark.read.json(rdd_json_array)'''.format(
pandas_df_jsonized, output_variable_name
)
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, max_rows
)
assert sparkcommand._scala_command(
input_variable_name, input_variable_value, output_variable_name
) == Command(expected_scala_code)
def test_should_raise_when_input_is_not_pandas_df():
with pytest.raises(BadUserDataException):
input_variable_name = "input"
input_variable_value = "not a pandas dataframe"
output_variable_name = "output"
sparkcommand = SendPandasDfToSparkCommand(
input_variable_name, input_variable_value, output_variable_name, 1
)
sparkcommand.to_command(
"spark",
input_variable_name,
input_variable_value,
output_variable_name,
) |
299,882 | test slack penalization | import unittest
from docplex.mp.model import Model
import networkx as nx
from openqaoa.problems import QUBO, MaximumCut, FromDocplex2IsingModel
class TestDocplex2IsingClass(unittest.TestCase):
"""
Test the converter from docplex models
"""
def test_qubo(self):
"""
Test the QUBO class is generated from the function FromDocplex2IsingModel
"""
# Creating a basic docplex model
mdl = Model("Test") # Docplex model
num_z = 5 # Number of variables
z = mdl.binary_var_list(num_z, name="Z") # docplex variables
objective = mdl.sum(z) - 2 * z[0] + z[3] * z[4] + 5 # objective function
mdl.minimize(objective) # Optimization
ising_problem = FromDocplex2IsingModel(
mdl
).ising_model # Ising model of the Docplex model
self.assertIsInstance(ising_problem, QUBO)
def METHOD_NAME(self):
"""
Test the equality and inequality constraints are encoded in the QUBO
model using slack variables for the inequality constraints approach.
"""
weights = [
18.25,
-4.0,
-8.0,
-4.0,
-6.0,
-12.0,
-6.0,
4.0,
2.0,
4.0,
2.0,
2.0,
4.0,
4.0,
2.0,
2.25,
1.25,
-2.0,
-4.0,
-2.0,
-2.0,
-2.0,
]
# Creating a basic docplex model
mdl = Model("Test inequal") # Docplex model
num_z = 2 # Number of variables
z = mdl.binary_var_list(num_z, name="Z") # docplex variables
objective = mdl.sum(z) - 2 * z[0] + z[1] * z[0] + 5 # objective function
# Adding constraints
mdl.add_constraint(mdl.sum(z[i] for i in range(num_z)) == 1)
mdl.add_constraint(2 * z[0] + 3 * z[1] >= 1)
mdl.add_constraint(2 * z[1] + z[0] <= 2)
mdl.minimize(objective) # Optimization
ising_problem = FromDocplex2IsingModel(
mdl
).ising_model # Ising model of the Docplex model
self.assertIsInstance(ising_problem, QUBO)
self.assertEqual(ising_problem.weights, weights)
def test_unbalanced_penalizations(self):
"""
Test the equality and inequality constraints are encoded in the QUBO
model using the unblanaced penalization method.
"""
weights = [4.25, -0.95, -2.45]
# Creating a basic docplex model
mdl = Model("Test inequal") # Docplex model
num_z = 2 # Number of variables
z = mdl.binary_var_list(num_z, name="Z") # docplex variables
objective = mdl.sum(z) - 2 * z[0] + z[1] * z[0] + 5 # objective function
# Adding constraints
mdl.add_constraint(mdl.sum(z[i] for i in range(num_z)) == 1)
mdl.add_constraint(2 * z[0] + 3 * z[1] >= 1)
mdl.add_constraint(2 * z[1] + z[0] <= 2)
mdl.minimize(objective) # Optimization
ising_problem = FromDocplex2IsingModel(
mdl, unbalanced_const=True
).ising_model # Ising model of the Docplex model
self.assertIsInstance(ising_problem, QUBO)
for weight_1, weight_2 in zip(ising_problem.weights, weights):
self.assertAlmostEqual(weight_1, weight_2)
def test_model_maxcut(self):
"""
Test the Maxcut application of OpenQAOA gives the same result as the
Docplex translation model.
"""
# Graph representing the maxcut problem
n_nodes = 5 # number of nodes
G = nx.Graph()
G.add_nodes_from(range(n_nodes))
G.add_edges_from([[0, 1], [0, 2], [1, 2], [4, 3], [3, 2], [4, 0], [2, 4]])
# Docplex model
mdl = Model(name="Max-cut")
x = mdl.binary_var_list(n_nodes, name="x")
for w, v in G.edges:
G.edges[w, v].setdefault("weight", 1)
objective = mdl.sum(
G.edges[i, j]["weight"] * x[i] * (1 - x[j])
+ G.edges[i, j]["weight"] * x[j] * (1 - x[i])
for i, j in G.edges
)
mdl.maximize(objective)
# Translating the problem to OQ ising Model
ModelOQ = FromDocplex2IsingModel(mdl)
Ising_model_OQ = ModelOQ.ising_model.asdict()
# Using the predefine function of this problem
IsingModelDirect = MaximumCut(G).qubo.asdict()
# Comparing both results in this specific case MaxCut from OpenQAOa gives
# the coefficients omultiplied by two of the DocplexToIsingModel
for nn, term in enumerate(IsingModelDirect["terms"]):
idx_OQ = Ising_model_OQ["terms"].index(term)
self.assertAlmostEqual(
2 * Ising_model_OQ["weights"][idx_OQ], IsingModelDirect["weights"][nn]
)
if __name__ == "__main__":
unittest.main() |
299,883 | resolve locale | import graphene
from graphene_django import DjangoObjectType
from graphene_django.debug import DjangoDebug
from pontoon.api.util import get_fields
from pontoon.base.models import (
Locale as LocaleModel,
Project as ProjectModel,
ProjectLocale as ProjectLocaleModel,
)
from pontoon.tags.models import Tag as TagModel
class Stats:
missing_strings = graphene.Int()
complete = graphene.Boolean()
class Tag(DjangoObjectType):
class Meta:
convert_choices_to_enum = False
model = TagModel
fields = (
"slug",
"name",
"priority",
)
class ProjectLocale(DjangoObjectType, Stats):
class Meta:
model = ProjectLocaleModel
fields = (
"project",
"locale",
"total_strings",
"approved_strings",
"pretranslated_strings",
"strings_with_errors",
"strings_with_warnings",
"unreviewed_strings",
)
class Project(DjangoObjectType, Stats):
class Meta:
convert_choices_to_enum = False
model = ProjectModel
fields = (
"name",
"slug",
"disabled",
"sync_disabled",
"pretranslation_enabled",
"visibility",
"system_project",
"info",
"deadline",
"priority",
"contact",
"total_strings",
"approved_strings",
"pretranslated_strings",
"strings_with_errors",
"strings_with_warnings",
"unreviewed_strings",
)
localizations = graphene.List(ProjectLocale)
tags = graphene.List(Tag)
def resolve_localizations(obj, info):
return obj.project_locale.all()
def resolve_tags(obj, info):
return obj.tag_set.all()
class Locale(DjangoObjectType, Stats):
class Meta:
model = LocaleModel
fields = (
"name",
"code",
"direction",
"cldr_plurals",
"plural_rule",
"script",
"population",
"team_description",
"total_strings",
"approved_strings",
"pretranslated_strings",
"strings_with_errors",
"strings_with_warnings",
"unreviewed_strings",
"google_translate_code",
"ms_translator_code",
"systran_translate_code",
"ms_terminology_code",
)
localizations = graphene.List(
ProjectLocale,
include_disabled=graphene.Boolean(False),
include_system=graphene.Boolean(False),
)
def resolve_localizations(obj, info, include_disabled, include_system):
projects = obj.project_locale.visible_for(info.context.user)
records = projects.filter(
project__disabled=False, project__system_project=False
)
if include_disabled:
records |= projects.filter(project__disabled=True)
if include_system:
records |= projects.filter(project__system_project=True)
return records.distinct()
class Query(graphene.ObjectType):
debug = graphene.Field(DjangoDebug, name="__debug")
# include_disabled=True will return both active and disabled projects.
# include_system=True will return both system and non-system projects.
projects = graphene.List(
Project,
include_disabled=graphene.Boolean(False),
include_system=graphene.Boolean(False),
)
project = graphene.Field(Project, slug=graphene.String())
locales = graphene.List(Locale)
locale = graphene.Field(Locale, code=graphene.String())
def resolve_projects(obj, info, include_disabled, include_system):
fields = get_fields(info)
projects = ProjectModel.objects.visible_for(info.context.user)
records = projects.filter(disabled=False, system_project=False)
if include_disabled:
records |= projects.filter(disabled=True)
if include_system:
records |= projects.filter(system_project=True)
if "projects.localizations" in fields:
records = records.prefetch_related("project_locale__locale")
if "projects.localizations.locale.localizations" in fields:
raise Exception("Cyclic queries are forbidden")
return records.distinct()
def resolve_project(obj, info, slug):
qs = ProjectModel.objects.visible_for(info.context.user)
fields = get_fields(info)
if "project.localizations" in fields:
qs = qs.prefetch_related("project_locale__locale")
if "project.tags" in fields:
qs = qs.prefetch_related("tag_set")
if "project.localizations.locale.localizations" in fields:
raise Exception("Cyclic queries are forbidden")
return qs.get(slug=slug)
def resolve_locales(obj, info):
qs = LocaleModel.objects
fields = get_fields(info)
if "locales.localizations" in fields:
qs = qs.prefetch_related("project_locale__project")
if "locales.localizations.project.localizations" in fields:
raise Exception("Cyclic queries are forbidden")
return qs.all()
def METHOD_NAME(obj, info, code):
qs = LocaleModel.objects
fields = get_fields(info)
if "locale.localizations" in fields:
qs = qs.prefetch_related("project_locale__project")
if "locale.localizations.project.localizations" in fields:
raise Exception("Cyclic queries are forbidden")
return qs.get(code=code)
schema = graphene.Schema(query=Query) |
299,884 | create train triples | import os
import pickle
import random
import pandas as pd
import torch
from torch.utils.data import DataLoader
from Marie.Util.NHopExtractor import HopExtractor
from Marie.Util.location import DATA_DIR
class LinkPredictionDataset(torch.utils.data.Dataset):
def __init__(self, df, dataset_path=None, dataset_name=None, neg_rate=20, mode="train"):
self.df = df
if dataset_path is None:
e2i_path = open(os.path.join(DATA_DIR, f'entity2idx.pkl'), 'rb')
r2i_path = open(os.path.join(DATA_DIR, f'relation2idx.pkl'), 'rb')
else:
e2i_path = open(os.path.join(DATA_DIR, f'{dataset_path}/entity2idx.pkl'), 'rb')
r2i_path = open(os.path.join(DATA_DIR, f'{dataset_path}/relation2idx.pkl'), 'rb')
self.entity2idx = pickle.load(e2i_path)
self.relation2idx = pickle.load(r2i_path)
self.ent_num = len(self.entity2idx.keys())
self.rel_num = len(self.relation2idx.keys())
self.all_t_idx_list = range(0, self.ent_num)
self.extractor = HopExtractor(dataset_dir=dataset_path, dataset_name=dataset_name)
self.neg_rate = neg_rate
self.mode = mode
if self.mode == "train":
self.all_triples = self.METHOD_NAME()
print(f"total number of triples: {len(self.all_triples)}")
else:
self.h_r_t = pickle.load(open(os.path.join(dataset_path, "h_r_t.pkl"), "rb"))
# self.df = self.df.sample(n=1000)
def create_fake_triples(self, h_idx, r_idx, t_idx):
neg_triples_hr_t = []
neg_triples_h_rt = []
neg_triples_ht_r = []
neg_triples_h_t_r = []
counter = 0
while len(neg_triples_h_t_r) <= self.neg_rate:
counter += 1
# print(f"neg sample number {counter}")
random_head_idx = random.randrange(0, self.ent_num)
random_tail_idx = random.randrange(0, self.ent_num)
triple_str = f"{random_head_idx}_{r_idx}_{random_tail_idx}"
if not self.extractor.check_triple_existence(triple_str=triple_str):
neg_triples_h_t_r.append((random_head_idx, r_idx, random_tail_idx))
while len(neg_triples_hr_t) <= self.neg_rate:
counter += 1
# print(f"neg sample number {counter}")
random_tail_idx = random.randrange(0, self.ent_num)
triple_str = f"{h_idx}_{r_idx}_{random_tail_idx}"
if not self.extractor.check_triple_existence(triple_str=triple_str):
neg_triples_hr_t.append((h_idx, r_idx, random_tail_idx))
while len(neg_triples_h_rt) <= self.neg_rate:
counter += 1
# print(f"neg sample number {counter}")
random_head_idx = random.randrange(0, self.ent_num)
triple_str = f"{random_head_idx}_{r_idx}_{t_idx}"
if not self.extractor.check_triple_existence(triple_str=triple_str):
neg_triples_h_rt.append((random_head_idx, r_idx, t_idx))
while len(neg_triples_ht_r) <= self.neg_rate:
counter += 1
# print(f"neg sample number {counter}")
random_rel_idx = random.randrange(0, self.rel_num)
triple_str = f"{h_idx}_{random_rel_idx}_{t_idx}"
if not self.extractor.check_triple_existence(triple_str=triple_str):
neg_triples_ht_r.append((h_idx, random_rel_idx, t_idx))
neg_triples = neg_triples_hr_t + neg_triples_h_rt + neg_triples_ht_r + neg_triples_h_t_r
return neg_triples
# def create_test_triples(self, idx, row):
# triples = []
# for idx, row in self.df.iterrows():
# print(f"{idx} out out {len(self.df)}")
# h_idx, r_idx, true_t_idx = self.entity2idx[row[0]], self.relation2idx[row[1]], self.entity2idx[row[2]]
# for tail in self.all_t_idx_list:
# triples.append((h_idx, r_idx, tail, true_t_idx))
# triples.append((h_idx, r_idx, true_t_idx, true_t_idx))
# return [h_idx, r_idx, true_t_idx]
def METHOD_NAME(self):
print("Creating all triples for training")
all_triples = []
for idx, row in self.df.iterrows():
print(f"{idx} out of {len(self.df)}")
h_idx, r_idx, t_idx = self.entity2idx[row[0]], self.relation2idx[row[1]], self.entity2idx[row[2]]
true_triple = (h_idx, r_idx, t_idx)
fake_triples = self.create_fake_triples(h_idx=h_idx, r_idx=r_idx, t_idx=t_idx)
for fake_triple in fake_triples:
all_triples.append((true_triple, fake_triple))
return all_triples
def __len__(self):
if self.mode == "test":
return len(self.df)
# return 1000
else:
return len(self.all_triples)
def __getitem__(self, item):
if self.mode == "test":
return self.h_r_t[item]
# return self.create_test_triples(idx=item, row=self.df.iloc[item])
else:
return self.all_triples[item]
if __name__ == "__main__":
dataset_dir = os.path.join(DATA_DIR, "CrossGraph", "fb15k")
df_train = pd.read_csv(os.path.join(dataset_dir, "fb15k-train.txt"), sep="\t", header=None)
df_train = df_train.sample(n=5)
df_valid = pd.read_csv(os.path.join(dataset_dir, "fb15k-valid.txt"), sep="\t", header=None)
df_test = pd.read_csv(os.path.join(dataset_dir, "fb15k-test.txt"), sep="\t", header=None)
# relations = list(df_train.loc[:, 1].values)
my_dataset = LinkPredictionDataset(df_train, dataset_path=dataset_dir, dataset_name="fb15k", neg_rate=20)
my_dataloader = DataLoader(my_dataset, shuffle=False, batch_size=20)
rel_num = my_dataset.rel_num
test_dataset = LinkPredictionDataset(df_test, dataset_path=dataset_dir, dataset_name="fb15k", neg_rate=rel_num,
mode="test")
test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=1)
for row in test_dataloader:
head, rel, true_tail = row
head = head.repeat(10)
rel = rel.repeat(10)
tail = torch.range(0, 10).long()
print(head, rel, tail) |
299,885 | sort order | from django.db import models
from modelcluster.fields import ParentalKey
from common.models import MetadataPageMixin
from common.blocks import (
Heading1,
Heading2,
Heading3,
AlignedImageBlock,
AlignedEmbedBlock,
RichTextBlockQuoteBlock,
VideoBlock,
)
from search.utils import get_search_content_by_fields
from wagtail.admin.panels import FieldPanel, InlinePanel, MultiFieldPanel
from wagtail import blocks
from wagtail.fields import StreamField, RichTextField
from wagtail.models import Page, Orderable
class MarketingIndexPage(MetadataPageMixin, Page):
subtitle = models.CharField(
max_length=255,
null=True,
blank=True,
help_text="Appears immediately below page title."
)
body = StreamField(
[
('text', blocks.RichTextBlock()),
('image', AlignedImageBlock()),
('raw_html', blocks.RawHTMLBlock()),
('blockquote', RichTextBlockQuoteBlock()),
('list', blocks.ListBlock(
blocks.CharBlock(label="List Item"),
template='common/blocks/list_block_columns.html'
)),
('video', AlignedEmbedBlock()),
('media_file', VideoBlock()),
('heading_1', Heading1()),
('heading_2', Heading2()),
('heading_3', Heading3()),
],
null=True,
blank=True,
use_json_field=True,
)
subheader = models.CharField(
max_length=255,
default="How to install SecureDrop at your organization.",
help_text="Displayed below features."
)
how_to_install_subtitle = models.CharField(
max_length=255,
null=True,
blank=True,
help_text="Appears immediately below subheader."
)
how_to_install_body = StreamField(
[
('text', blocks.RichTextBlock()),
('image', AlignedImageBlock()),
('raw_html', blocks.RawHTMLBlock()),
('blockquote', RichTextBlockQuoteBlock()),
('list', blocks.ListBlock(
blocks.CharBlock(label="List Item"),
template='common/blocks/list_block_columns.html'
)),
('video', AlignedEmbedBlock()),
('media_file', VideoBlock()),
('heading_1', Heading1()),
('heading_2', Heading2()),
('heading_3', Heading3()),
],
null=True,
blank=True,
use_json_field=True,
)
content_panels = Page.content_panels + [
FieldPanel('subtitle'),
FieldPanel('body'),
InlinePanel('features', label="Features"),
MultiFieldPanel(
heading='How to install',
children=[
FieldPanel('subheader'),
FieldPanel('how_to_install_subtitle'),
FieldPanel('how_to_install_body'),
]
),
]
subpage_types = ['marketing.FeaturePage']
search_fields_pgsql = ['title', 'body', 'subheader']
def get_search_content(self):
search_elements = get_search_content_by_fields(self, self.search_fields_pgsql)
for feature in self.features.all():
search_elements.append(feature.feature.title)
search_elements.append(feature.feature.teaser_title)
search_elements.append(feature.feature.teaser_description)
return search_elements
class OrderedFeatures(Orderable):
page = ParentalKey(
'marketing.MarketingIndexPage',
related_name='features'
)
feature = models.ForeignKey(
'marketing.FeaturePage',
related_name='marketing_orders',
on_delete=models.CASCADE,
)
panels = [
FieldPanel('feature')
]
class Meta:
unique_together = (('page', 'feature'),)
class FeaturePage(MetadataPageMixin, Page):
teaser_title = models.CharField(max_length=60)
icon = models.ForeignKey(
'common.CustomImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+',
)
teaser_description = models.CharField(
max_length=255,
help_text="A one sentence description displayed with the feature overview."
)
description = RichTextField(
features=['bold', 'italic', 'ol', 'ul', 'hr', 'link', 'document-link', 'code'],
blank=True,
null=True
)
parent_page_types = ['marketing.MarketingIndexPage']
content_panels = Page.content_panels + [
FieldPanel('teaser_title'),
FieldPanel('icon'),
FieldPanel('teaser_description'),
FieldPanel('description'),
]
search_fields_pgsql = ['title', 'teaser_title', 'teaser_description', 'description']
def get_search_content(self):
return get_search_content_by_fields(self, self.search_fields_pgsql)
def METHOD_NAME(self):
return self.marketing_orders.get(page=self.get_parent()).METHOD_NAME
def next(self):
ordered_feature = OrderedFeatures.objects.filter(
page=self.get_parent(),
sort_order__gt=self.METHOD_NAME()).first()
if ordered_feature:
return ordered_feature.feature
return None
def previous(self):
ordered_feature = OrderedFeatures.objects.filter(
page=self.get_parent(),
sort_order__lt=self.METHOD_NAME()).last()
if ordered_feature:
return ordered_feature.feature
return None
def all_features(self):
return self.get_parent().specific.features.all()
def __str__(self):
return self.teaser_title |
299,886 | test refresh objects bad param | # SPDX-License-Identifier: GPL-2.0-only
#
# This file is part of Nominatim. (https://nominatim.org)
#
# Copyright (C) 2022 by the Nominatim developer community.
# For a full list of authors see the git log.
"""
Tests for command line interface wrapper for refresk command.
"""
import pytest
import nominatim.tools.refresh
import nominatim.tools.postcodes
import nominatim.indexer.indexer
class TestRefresh:
@pytest.fixture(autouse=True)
def setup_cli_call(self, cli_call, temp_db, cli_tokenizer_mock):
self.call_nominatim = cli_call
self.tokenizer_mock = cli_tokenizer_mock
@pytest.mark.parametrize("command,func", [
('address-levels', 'load_address_levels_from_config'),
('wiki-data', 'import_wikipedia_articles'),
('importance', 'recompute_importance'),
('website', 'setup_website'),
])
def test_refresh_command(self, mock_func_factory, command, func):
func_mock = mock_func_factory(nominatim.tools.refresh, func)
assert self.call_nominatim('refresh', '--' + command) == 0
assert func_mock.called == 1
def test_refresh_word_count(self):
assert self.call_nominatim('refresh', '--word-count') == 0
assert self.tokenizer_mock.update_statistics_called
def test_refresh_word_tokens(self):
assert self.call_nominatim('refresh', '--word-tokens') == 0
assert self.tokenizer_mock.update_word_tokens_called
def test_refresh_postcodes(self, mock_func_factory, place_table):
func_mock = mock_func_factory(nominatim.tools.postcodes, 'update_postcodes')
idx_mock = mock_func_factory(nominatim.indexer.indexer.Indexer, 'index_postcodes')
assert self.call_nominatim('refresh', '--postcodes') == 0
assert func_mock.called == 1
assert idx_mock.called == 1
def test_refresh_postcodes_no_place_table(self):
# Do nothing without the place table
assert self.call_nominatim('refresh', '--postcodes') == 0
def test_refresh_create_functions(self, mock_func_factory):
func_mock = mock_func_factory(nominatim.tools.refresh, 'create_functions')
assert self.call_nominatim('refresh', '--functions') == 0
assert func_mock.called == 1
assert self.tokenizer_mock.update_sql_functions_called
def test_refresh_wikidata_file_not_found(self, monkeypatch):
monkeypatch.setenv('NOMINATIM_WIKIPEDIA_DATA_PATH', 'gjoiergjeroi345Q')
assert self.call_nominatim('refresh', '--wiki-data') == 1
def test_refresh_secondary_importance_file_not_found(self):
assert self.call_nominatim('refresh', '--secondary-importance') == 1
def test_refresh_secondary_importance_new_table(self, mock_func_factory):
mocks = [mock_func_factory(nominatim.tools.refresh, 'import_secondary_importance'),
mock_func_factory(nominatim.tools.refresh, 'create_functions')]
assert self.call_nominatim('refresh', '--secondary-importance') == 0
assert mocks[0].called == 1
assert mocks[1].called == 1
def test_refresh_importance_computed_after_wiki_import(self, monkeypatch):
calls = []
monkeypatch.setattr(nominatim.tools.refresh, 'import_wikipedia_articles',
lambda *args, **kwargs: calls.append('import') or 0)
monkeypatch.setattr(nominatim.tools.refresh, 'recompute_importance',
lambda *args, **kwargs: calls.append('update'))
assert self.call_nominatim('refresh', '--importance', '--wiki-data') == 0
assert calls == ['import', 'update']
@pytest.mark.parametrize('params', [('--data-object', 'w234'),
('--data-object', 'N23', '--data-object', 'N24'),
('--data-area', 'R7723'),
('--data-area', 'r7723', '--data-area', 'r2'),
('--data-area', 'R9284425', '--data-object', 'n1234567894567')])
def test_refresh_objects(self, params, mock_func_factory):
func_mock = mock_func_factory(nominatim.tools.refresh, 'invalidate_osm_object')
assert self.call_nominatim('refresh', *params) == 0
assert func_mock.called == len(params)/2
@pytest.mark.parametrize('func', ('--data-object', '--data-area'))
@pytest.mark.parametrize('param', ('234', 'a55', 'R 453', 'Rel'))
def METHOD_NAME(self, func, param, mock_func_factory):
func_mock = mock_func_factory(nominatim.tools.refresh, 'invalidate_osm_object')
self.call_nominatim('refresh', func, param) == 1
assert func_mock.called == 0 |
299,887 | test it lists organizations | from unittest import mock
from unittest.mock import call
import colander
import pytest
from h.models.group import (
GROUP_DESCRIPTION_MAX_LENGTH,
GROUP_NAME_MAX_LENGTH,
GROUP_NAME_MIN_LENGTH,
)
from h.models.organization import Organization
from h.schemas.forms.admin.group import AdminGroupSchema
class TestAdminGroupSchema:
def test_it_allows_with_valid_data(self, group_data, bound_schema):
bound_schema.deserialize(group_data)
def test_it_raises_if_name_too_short(self, group_data, bound_schema):
too_short_name = "a" * (GROUP_NAME_MIN_LENGTH - 1)
group_data["name"] = too_short_name
with pytest.raises(colander.Invalid, match=".*name.*"):
bound_schema.deserialize(group_data)
def test_it_raises_if_name_too_long(self, group_data, bound_schema):
too_long_name = "a" * (GROUP_NAME_MAX_LENGTH + 1)
group_data["name"] = too_long_name
with pytest.raises(colander.Invalid, match=".*name.*"):
bound_schema.deserialize(group_data)
def test_it_raises_if_description_too_long(self, group_data, bound_schema):
too_long_description = "a" * (GROUP_DESCRIPTION_MAX_LENGTH + 1)
group_data["description"] = too_long_description
with pytest.raises(colander.Invalid, match=".*description.*"):
bound_schema.deserialize(group_data)
def test_it_raises_if_group_type_invalid(self, group_data, bound_schema):
group_data["group_type"] = "foobarbazding"
with pytest.raises(colander.Invalid, match=".*group_type.*"):
bound_schema.deserialize(group_data)
@pytest.mark.parametrize("required_field", ("name", "group_type", "creator"))
def test_it_raises_if_required_field_missing(
self, group_data, bound_schema, required_field
):
group_data.pop(required_field)
with pytest.raises(colander.Invalid, match=f".*{required_field}.*"):
bound_schema.deserialize(group_data)
@pytest.mark.parametrize("optional_field", ("description",))
def test_it_allows_when_optional_field_missing(
self, group_data, bound_schema, optional_field
):
group_data.pop(optional_field)
bound_schema.deserialize(group_data)
@pytest.mark.parametrize(
"invalid_scope", ["not-a-url", "foo:123", "example.com", "example.com/bar"]
)
def test_it_raises_if_origin_invalid(self, group_data, bound_schema, invalid_scope):
group_data["scopes"] = [invalid_scope]
with pytest.raises(colander.Invalid, match="scope.*must be a complete URL"):
bound_schema.deserialize(group_data)
def test_it_raises_if_no_origins(self, group_data, bound_schema):
group_data["scopes"] = []
with pytest.raises(colander.Invalid, match="At least one scope"):
bound_schema.deserialize(group_data)
def test_it_raises_if_group_type_changed(
self, group_data, pyramid_csrf_request, org, user_service
):
group = mock.Mock(type="open")
group_data["group_type"] = "restricted"
schema = AdminGroupSchema().bind(
request=pyramid_csrf_request,
group=group,
user_svc=user_service,
organizations={org.pubid: org},
)
with pytest.raises(colander.Invalid, match="Changing group type"):
schema.deserialize(group_data)
def test_it_does_not_raise_if_group_type_is_same(
self, group_data, pyramid_csrf_request, org, user_service
):
group = mock.Mock(type="open")
group_data["group_type"] = "open"
schema = AdminGroupSchema().bind(
request=pyramid_csrf_request,
group=group,
user_svc=user_service,
organizations={org.pubid: org},
)
schema.deserialize(group_data)
def test_it_raises_if_member_invalid(self, group_data, bound_schema, user_service):
user_service.fetch.return_value = None
group_data["members"] = ["valid_user", "invalid_user"]
with pytest.raises(colander.Invalid, match="members.1"):
bound_schema.deserialize(group_data)
def test_it_passes_through_the_authority_when_checking_users(
self, group_data, bound_schema, user_service, third_party_org
):
group_data["organization"] = third_party_org.pubid
group_data["members"] = ["valid_user"]
group_data["creator"] = "valid_creator"
bound_schema.deserialize(group_data)
user_service.fetch.assert_has_calls(
(
# It's a bit of a shame to enshrine the order, as it really
# doesn't matter, but it's the easiest thing to do
call("valid_user", third_party_org.authority),
call("valid_creator", third_party_org.authority),
)
)
def test_it_allows_when_creator_exists_at_authority(self, group_data, bound_schema):
bound_schema.deserialize(group_data)
def test_it_passes_creator_and_authority_to_user_fetch(
self, group_data, bound_schema, user_service, org
):
bound_schema.deserialize(group_data)
user_service.fetch.assert_called_with(group_data["creator"], org.authority)
def test_it_allows_when_user_exists_at_authority(self, group_data, bound_schema):
bound_schema.deserialize(group_data)
def test_it_raises_when_the_creator_user_cannot_be_found(
self, group_data, bound_schema
):
"""
It raises if there's no user with the given username and authority.
It should raise if there's no user in the database with the same
username as entered into the form and the same authority as the
organization selected in the form.
"""
group_data["creator"] = "invalid_creator"
with pytest.raises(colander.Invalid, match="creator"):
bound_schema.deserialize(group_data)
def METHOD_NAME(self, bound_schema, org, third_party_org):
for child in bound_schema.children:
if child.name == "organization":
org_node = child
assert org_node.widget.values == [
(org.pubid, f"{org.name} ({org.authority})"),
(
third_party_org.pubid,
f"{third_party_org.name} ({third_party_org.authority})",
),
]
@pytest.fixture
def group_data(org):
"""
Return a serialized representation of the "Create Group" form.
This is the representation that Deform passes to Colander for
deserialization and validation after the HTML form is processed by
Peppercorn.
"""
return {
"name": "My Group",
"group_type": "open",
"creator": "valid_creator",
"description": "Lorem ipsum dolor sit amet consectetuer",
"organization": org.pubid,
"scopes": ["http://www.foo.com", "https://www.foo.com"],
"enforce_scope": True,
}
@pytest.fixture
def user_service(user_service, factories):
def fetch(username, authority): # pylint: disable=unused-argument
if "invalid" in username:
return False
return factories.User()
user_service.fetch.side_effect = fetch
return user_service
@pytest.fixture
def org(factories):
return factories.Organization()
@pytest.fixture
def third_party_org(db_session):
third_party_org = Organization(
name="3rd_party", pubid="3rd_party_id", authority="3rd_party_authority"
)
db_session.add(third_party_org)
return third_party_org
@pytest.fixture
def bound_schema(pyramid_csrf_request, org, third_party_org, user_service):
schema = AdminGroupSchema().bind(
request=pyramid_csrf_request,
user_svc=user_service,
organizations={org.pubid: org, third_party_org.pubid: third_party_org},
)
return schema |
299,888 | cache clear | try: # NOQA
from functools import lru_cache # NOQA @UnusedImport
except ImportError:
# backport of Python's 3.3 lru_cache, written by Raymond Hettinger and
# licensed under MIT license, from:
# <http://code.activestate.com/recipes/578078-py26-and-py30-backport-of-python-33s-lru-cache/>
# Should be removed when Django only supports Python 3.2 and above.
from collections import namedtuple
from functools import update_wrapper
from threading import RLock
_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"])
class _HashedSeq(list):
__slots__ = "hashvalue"
def __init__(self, tup, hash=hash): # NOQA @ReservedAssignment
self[:] = tup
self.hashvalue = hash(tup)
def __hash__(self):
return self.hashvalue
def _make_key(
args,
kwds,
typed,
kwd_mark=(object(),),
fasttypes=None,
sorted=sorted,
tuple=tuple,
type=type,
len=len,
):
if fasttypes is None:
fasttypes = {int, str, frozenset, type(None)}
"Make a cache key from optionally typed positional and keyword arguments"
key = args
if kwds:
sorted_items = sorted(kwds.items())
key += kwd_mark
for item in sorted_items:
key += item
if typed:
key += tuple(type(v) for v in args)
if kwds:
key += tuple(type(v) for __, v in sorted_items)
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedSeq(key)
def lru_cache(maxsize=100, typed=False):
"""Least-recently-used cache decorator.
If *maxsize* is set to None, the LRU features are disabled and the cache
can grow without bound.
If *typed* is True, arguments of different types will be cached separately.
For example, f(3.0) and f(3) will be treated as distinct calls with
distinct results.
Arguments to the cached function must be hashable.
View the cache statistics named tuple (hits, misses, maxsize, currsize) with
f.cache_info(). Clear the cache and statistics with f.cache_clear().
Access the underlying function with f.__wrapped__.
See: https://en.wikipedia.org/wiki/Cache_algorithms#Least_Recently_Used
"""
# Users should only access the lru_cache through its public API:
# cache_info, cache_clear, and f.__wrapped__
# The internals of the lru_cache are encapsulated for thread safety and
# to allow the implementation to change (including a possible C
# version).
def decorating_function(user_function):
cache = {}
# make statistics updateable non-locally
stats = [0, 0]
HITS, MISSES = 0, 1 # names for the stats fields
make_key = _make_key
# bound method to lookup key or return None
cache_get = cache.get
# localize the global len() function
_len = len
# because linkedlist updates aren't threadsafe
lock = RLock()
# root of the circular doubly linked list
root = []
# initialize by pointing to self
root[:] = [root, root, None, None]
# make updateable non-locally
nonlocal_root = [root]
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
if maxsize == 0:
def wrapper(*args, **kwds):
# no caching, just do a statistics update after a
# successful call
result = user_function(*args, **kwds)
stats[MISSES] += 1
return result
elif maxsize is None:
def wrapper(*args, **kwds):
# simple caching without ordering or size limit
key = make_key(args, kwds, typed)
# root used here as a unique not-found sentinel
result = cache_get(key, root)
if result is not root:
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
cache[key] = result
stats[MISSES] += 1
return result
else:
def wrapper(*args, **kwds):
# size limited caching that tracks accesses by recency
key = make_key(args, kwds, typed) if kwds or typed else args
with lock:
link = cache_get(key)
if link is not None:
# record recent use of the key by moving it to the
# front of the list
(root,) = nonlocal_root
link_prev, link_next, key, result = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
stats[HITS] += 1
return result
result = user_function(*args, **kwds)
with lock:
(root,) = nonlocal_root
if key in cache:
# getting here means that this same key was added to the
# cache while the lock was released. since the link
# update is already done, we need only return the
# computed result and update the count of misses.
pass
elif _len(cache) >= maxsize:
# use the old root to store the new key and result
oldroot = root
oldroot[KEY] = key
oldroot[RESULT] = result
# empty the oldest link and make it the new root
root = nonlocal_root[0] = oldroot[NEXT]
oldkey = root[KEY]
# oldvalue = root[RESULT]
root[KEY] = root[RESULT] = None
# now update the cache dictionary for the new links
del cache[oldkey]
cache[key] = oldroot
else:
# put result in a new link at the front of the list
last = root[PREV]
link = [last, root, key, result]
last[NEXT] = root[PREV] = cache[key] = link
stats[MISSES] += 1
return result
def cache_info():
"""Report cache statistics"""
with lock:
return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache))
def METHOD_NAME():
"""Clear the cache and cache statistics"""
with lock:
cache.clear()
root = nonlocal_root[0]
root[:] = [root, root, None, None]
stats[:] = [0, 0]
wrapper.__wrapped__ = user_function
wrapper.cache_info = cache_info
wrapper.METHOD_NAME = METHOD_NAME
return update_wrapper(wrapper, user_function)
return decorating_function |
299,889 | get | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import RecoveryServicesBackupClientMixinABC, _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
vault_name: str, resource_group_name: str, job_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", "2023-02-01"))
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupJobs/{jobName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, "str"),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str"),
"jobName": _SERIALIZER.url("job_name", job_name, "str"),
}
_url: str = _format_url_section(_url, **path_format_arguments) # type: ignore
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class JobDetailsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicesbackup.activestamp.RecoveryServicesBackupClient`'s
:attr:`job_details` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def METHOD_NAME(self, vault_name: str, resource_group_name: str, job_name: str, **kwargs: Any) -> _models.JobResource:
"""Gets extended information associated with the job.
:param vault_name: The name of the recovery services vault. Required.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present. Required.
:type resource_group_name: str
:param job_name: Name of the job whose details are to be fetched. Required.
:type job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobResource or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.JobResource
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: str = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version))
cls: ClsType[_models.JobResource] = kwargs.pop("cls", None)
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
job_name=job_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.METHOD_NAME.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
_stream = False
pipeline_response: PipelineResponse = self._client._pipeline.run( # pylint: disable=protected-access
request, stream=_stream, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("JobResource", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
METHOD_NAME.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupJobs/{jobName}"
} |
299,890 | test automap4 | import os
import sys
sys.path.append(
os.path.normpath(
os.path.join(os.path.abspath(__file__), "..", "..", "..", "common")
)
)
from env_indigo import *
indigo = Indigo()
def testAutomap1():
print("*** Simple test 1 ***")
rxn1 = indigo.loadReaction(
"CC1=CSC(C=C(N=[N+]=[N-])C(OCC)=O)=C1>>CC1=CSC2=C1NC(C(OCC)=O)=C2"
)
rxn1.automap("discard")
print(rxn1.smiles())
def testAutomap2():
print("*** Simple test 2 ***")
rxn = indigo.createReaction()
rxn.addProduct(indigo.loadMolecule("C(CC1=CC=CC=C1)C1=CC=C(S1)C=O"))
rxn.addReactant(indigo.loadMolecule("C1(=CC=CC=C1)C#CC1=CC=C(S1)C=O"))
rxn.addReactant(indigo.loadMolecule("C(CC1=CC=CC=C1)C=1C=C(SC1)C=O"))
rxn.automap("discard")
print(rxn.smiles())
def testAutomap3():
print("*** Test reaction with single atom component mapping ***")
rxn = indigo.createReaction()
rxn.addReactant(indigo.loadMolecule("CC(OC)=O"))
rxn.addReactant(indigo.loadMolecule("[Na+].[OH-]"))
rxn.addProduct(indigo.loadMolecule("CC(O)=O"))
rxn.addProduct(indigo.loadMolecule("C[O-].[Na+]"))
rxn.automap("discard")
print(rxn.smiles())
def METHOD_NAME():
print("*** Testing foldHydrogens with automap ***")
reaction = indigo.createReaction()
reaction.addProduct(
indigo.loadMolecule("O=C1CCC[C@]2([H])C3C=CC(C3)[C@@]21[H]")
)
reaction.addReactant(indigo.loadMolecule("C1C=CC=C1"))
reaction.addReactant(indigo.loadMolecule("O=C1CCCC=C1"))
print(reaction.smiles())
reaction.foldHydrogens() # if folding is turned on then there is an a error in automap
print(reaction.smiles())
reaction.automap("discard")
print(reaction.smiles())
def testAutomap5():
print("*** Test reaction with single atoms***")
rxn = indigo.createReaction()
rxn.addReactant(indigo.loadMolecule("CC(OC)=O"))
rxn.addReactant(indigo.loadMolecule("[Na+]"))
rxn.addProduct(indigo.loadMolecule("CC(O)=O"))
rxn.addProduct(indigo.loadMolecule("[Na+]"))
rxn.automap("discard")
print(rxn.smiles())
def testAutomap6():
print("*** Test query reaction ***")
qr = indigo.loadQueryReaction("C1=CC=CC=C1>>C1=CC=CC(CC2=CC=CC=C2)=C1")
qr.automap("discard")
print(qr.smiles())
def testAutomap(testname, reactants, products, mode="discard"):
print("*** %s ***" % testname)
rxn = indigo.createReaction()
for r in reactants:
rxn.addReactant(indigo.loadMolecule(r))
for p in products:
rxn.addProduct(indigo.loadMolecule(p))
print("Before:")
print(rxn.smiles())
rxn.automap(mode)
print("After:")
print(rxn.smiles())
def testAutomapR(testname, rxn_s, mode="discard"):
print("*** %s ***" % testname)
rxn = indigo.loadReaction(rxn_s)
print("Before:")
print(rxn.canonicalSmiles())
rxn.automap(mode)
print("After:")
print(rxn.canonicalSmiles())
def testAutomapQuery(testname, reactants, products, mode="discard"):
print("*** %s ***" % testname)
rxn = indigo.createQueryReaction()
for r in reactants:
rxn.addReactant(indigo.loadQueryMolecule(r))
for p in products:
rxn.addProduct(indigo.loadQueryMolecule(p))
print("Before:")
print(rxn.smiles())
rxn.automap(mode)
print("After:")
print(rxn.smiles())
testAutomap1()
testAutomap2()
testAutomap3()
METHOD_NAME()
testAutomap5()
testAutomap(
"AAM1",
["O=CN(C)C", "S(=O)(Cl)Cl", "OC(=O)c1ccccn1", "[OH-].[Na+]"],
["Cl.C(C)OC(=O)C1=NC=CC(=C1)Cl"],
)
testAutomap(
"AAM2",
["S(=O)(Cl)Cl", "OC(=O)c1ccccn1", "[OH-].[Na+]", "[NH4+].[OH-]"],
["ClC=1C=C(N=CC1)C(=O)N"],
)
testAutomap(
"AAM3",
["[H-].[Na+]", "N#Cc1ccccn1", "NC(N)=O", "S(O)(O)(=O)=O"],
["N1=C(C=CC=C1)C1=NC(=NC(=N1)C1=NC=CC=C1)O"],
)
testAutomap(
"AAM4",
["[H-].[Na+]", "N#Cc1ccccn1", "Cl.NC(=N)N"],
["N1=C(C=CC=C1)C1=NC(=NC(=N1)C1=NC=CC=C1)N"],
)
testAutomap("AAM5", ["C1CC1"], ["C1CCC1C"])
testAutomap("AAM6", ["C1CCC1C"], ["C1CC1"])
testAutomap(
"D-Y-exchange1",
["C1CC1", "C1CC1", "C1CC1"],
["CC(C)C", "CC(C)C", "CC(C)C"],
)
testAutomap("D-Y-exchange2", ["C1CC1", "CC(C)C"], ["CC(C)C", "C1CC1"])
testAutomap("D-Y-exchange3", ["C1C2C11C3CC213"], ["CC(C)C(C)C(C)C(C)C"])
testAutomap("CON1", ["CCCC", "NNNN.OOOO"], ["CCCC.NNNN", "OOOO"])
testAutomap("CON2", ["CCCC", "NNNN.NNNN"], ["CCCC", "NNNN"])
testAutomap("INGNORE CHARGES0", ["CCCCC"], ["CC[C++]CC"])
testAutomap(
"INGNORE CHARGES1", ["CCCCC"], ["CC[C++]CC"], "discard ignore_charges"
)
testAutomap("INGNORE ISOTOPE0", ["CCCCC"], ["CC[8CH2]CC"])
testAutomap(
"INGNORE ISOTOPE1", ["CCCCC"], ["CC[8CH2]CC"], "discard ignore_isotopes"
)
testAutomap("INGNORE VALENCE0", ["CC[GeH2]CC"], ["CC[Ge]CC"])
testAutomap(
"INGNORE VALENCE1", ["CC[GeH2]CC"], ["CC[Ge]CC"], "discard ignore_valence"
)
testAutomap("INGNORE RADICAL0", ["CCCCC"], ["CC[CH]CC"])
testAutomap(
"INGNORE RADICAL1", ["CCCCC"], ["CC[CH]CC"], "discard ignore_radicals"
)
testAutomap(
"AUTOMORPHISM KEEP ATOM DEGREE",
["BrC1=CC=CC=C1"],
["C1=CC=CC=C1C1CN(CCN1)C"],
"discard",
)
testAutomap(
"AUTOMORPHISM KEEP BOND ORDER",
["C=C1CC=C(C=C1)P(C1=CC=CC=C1)C1=CC=CC=C1"],
["C(NC1C=CC=C1)C1CC=C(C=C1)P(C1=CC=CC=C1)C1=CC=CC=C1"],
"discard",
)
testAutomap(
"AUTOMORPHISM KEEP ATOM NUMBER",
["COC(C1=C(C=CC(=C1)I)O)=O", "C(C)C(C(=O)O)Br"],
["COC(C1=C(C=CC(=C1)I)OCC(=O)OCC)=O"],
"discard",
)
testAutomap(
"AAM WITH DISSOCIATIONS",
[
"NCC(c1c2c(ccc(OC)c2)ccc1)CO",
"CCN(CC)CC",
"FC(F)(F)C(OC(=O)C(F)(F)F)=O",
],
["FC(F)(F)C(NCC(c1c2c(ccc(OC)c2)ccc1)CO)=O"],
"discard",
)
testAutomapQuery(
"QUERY AAM", ["C1=CC=CC=C1"], ["C1=CC=CC(CC2=CC=CC=C2)=C1"], "discard"
)
testAutomapR(
"Keep mapping in KEEP mode",
"C1CC[NH:2]CC1.C1CC[S:1]CC1>>C1CC2CC[S:2]CC2C[NH:1]1",
"KEEP",
)
testAutomapR(
"Keep mapping in ALTER mode",
"C1CC[NH:2]CC1.C1CC[S:1]CC1>>C1CC2CC[S:2]CC2C[NH:1]1",
"ALTER",
)
testAutomapR(
"Keep mapping in KEEP mode for radicals",
"C[12CH2:1]C(CCCC)[CH]CCCCCCC>>C[13CH2:1]C(CCCC)[C]CCCCCCCC |^1:7,^4:22|",
"KEEP",
) |
299,891 | test write esri ascii doctest two vars | #! /usr/bin/env python
import os
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from landlab import RasterModelGrid
from landlab.io import read_esri_ascii, write_esri_ascii
_TEST_DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def test_write_esri_ascii_doctest_one_var(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.at_node["air__temperature"] = np.arange(20.0)
with tmpdir.as_cwd():
files = write_esri_ascii("test.asc", grid)
assert [os.path.basename(name) for name in sorted(files)] == ["test.asc"]
assert sorted(os.listdir()) == sorted(files)
def METHOD_NAME(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.at_node["air__temperature"] = np.arange(20.0)
with tmpdir.as_cwd():
grid.at_node["land_surface__elevation"] = np.arange(20.0)
files = write_esri_ascii("test.asc", grid)
assert [os.path.basename(name) for name in sorted(files)] == [
"test_air__temperature.asc",
"test_land_surface__elevation.asc",
]
assert sorted(os.listdir()) == sorted(files)
def test_grid_with_no_fields(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
with tmpdir.as_cwd(), pytest.raises(ValueError):
write_esri_ascii("test.asc", grid)
def test_grid_with_one_field(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.add_field("air__temperature", np.arange(20.0), at="node")
with tmpdir.as_cwd():
files = write_esri_ascii("test.asc", grid)
assert files == ["test.asc"]
for fname in files:
assert os.path.isfile(fname)
def test_grid_with_two_fields(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.add_field("air__temperature", np.arange(20.0), at="node")
grid.add_field("land_surface__elevation", np.arange(20.0), at="node")
with tmpdir.as_cwd():
files = write_esri_ascii("test.asc", grid)
files.sort()
assert files == [
"test_air__temperature.asc",
"test_land_surface__elevation.asc",
]
for fname in files:
assert os.path.isfile(fname)
def test_names_keyword_as_str(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.add_field("air__temperature", np.arange(20.0), at="node")
grid.add_field("land_surface__elevation", np.arange(20.0), at="node")
with tmpdir.as_cwd():
files = write_esri_ascii("test.asc", grid, names="air__temperature")
assert files == ["test.asc"]
assert os.path.isfile("test.asc")
def test_names_keyword_as_list(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.add_field("air__temperature", np.arange(20.0), at="node")
grid.add_field("land_surface__elevation", np.arange(20.0), at="node")
with tmpdir.as_cwd():
files = write_esri_ascii("test.asc", grid, names=["air__temperature"])
assert files == ["test.asc"]
assert os.path.isfile("test.asc")
def test_names_keyword_multiple_names(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.add_field("air__temperature", np.arange(20.0), at="node")
grid.add_field("land_surface__elevation", np.arange(20.0), at="node")
with tmpdir.as_cwd():
files = write_esri_ascii(
"test.asc", grid, names=["air__temperature", "land_surface__elevation"]
)
files.sort()
assert files == [
"test_air__temperature.asc",
"test_land_surface__elevation.asc",
]
for fname in files:
assert os.path.isfile(fname)
def test_names_keyword_with_bad_name(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.add_field("air__temperature", np.arange(20.0), at="node")
with tmpdir.as_cwd(), pytest.raises(ValueError):
write_esri_ascii("test.asc", grid, names="not_a_name")
def test_clobber_keyword(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0))
grid.add_field("air__temperature", np.arange(20.0), at="node")
with tmpdir.as_cwd():
write_esri_ascii("test.asc", grid)
with pytest.raises(ValueError):
write_esri_ascii("test.asc", grid)
with pytest.raises(ValueError):
write_esri_ascii("test.asc", grid, clobber=False)
write_esri_ascii("test.asc", grid, clobber=True)
def test_write_then_read(tmpdir):
grid = RasterModelGrid((4, 5), xy_spacing=(2.0, 2.0), xy_of_lower_left=(15.0, 10.0))
grid.add_field("air__temperature", np.arange(20.0), at="node")
with tmpdir.as_cwd():
write_esri_ascii("test.asc", grid)
new_grid, field = read_esri_ascii("test.asc")
assert grid.number_of_node_columns == new_grid.number_of_node_columns
assert grid.number_of_node_rows == new_grid.number_of_node_rows
assert grid.dx == new_grid.dx
assert (grid.x_of_node.min(), grid.y_of_node.min()) == (15.0, 10.0)
assert_array_almost_equal(grid.node_x, new_grid.node_x)
assert_array_almost_equal(grid.node_y, new_grid.node_y)
assert_array_almost_equal(field, grid.at_node["air__temperature"]) |
299,892 | train step | """Tests for calling optimizer on ParameterServerStrategy."""
import tensorflow.compat.v2 as tf
from absl.testing import parameterized
import keras
from keras.optimizers import adadelta
from keras.optimizers import adagrad
from keras.optimizers import adam
from keras.optimizers import adamax
from keras.optimizers import adamw
from keras.optimizers import ftrl
from keras.optimizers import lion
from keras.optimizers import nadam
from keras.optimizers import rmsprop
from keras.optimizers import sgd
from keras.utils import dataset_creator
from keras.utils import losses_utils
ds_combinations = tf.__internal__.distribute.combinations
STRATEGIES = [
ds_combinations.parameter_server_strategy_3worker_2ps_cpu,
ds_combinations.parameter_server_strategy_3worker_2ps_1gpu,
]
adadelta_fn = tf.__internal__.test.combinations.NamedObject(
"adadelta",
lambda: adadelta.Adadelta(
0.002, use_ema=True, ema_overwrite_frequency=None
),
)
adagrad_fn = tf.__internal__.test.combinations.NamedObject(
"adagrad", lambda: adagrad.Adagrad(0.002)
)
adam_fn = tf.__internal__.test.combinations.NamedObject(
"adam", lambda: adam.Adam(0.002)
)
adamax_fn = tf.__internal__.test.combinations.NamedObject(
"adamax", lambda: adamax.Adamax(0.002)
)
adamw_fn = tf.__internal__.test.combinations.NamedObject(
"adamw", lambda: adamw.AdamW(0.002, weight_decay=0.004)
)
ftrl_fn = tf.__internal__.test.combinations.NamedObject(
"ftrl", lambda: ftrl.Ftrl(0.002)
)
lion_fn = tf.__internal__.test.combinations.NamedObject(
"lion", lambda: lion.Lion(0.002)
)
nadam_fn = tf.__internal__.test.combinations.NamedObject(
"experimentnadam", lambda: nadam.Nadam(0.002)
)
rmsprop_fn = tf.__internal__.test.combinations.NamedObject(
"rmsprop", lambda: rmsprop.RMSprop(0.002)
)
sgd_fn = tf.__internal__.test.combinations.NamedObject(
"sgdaverage",
lambda: sgd.SGD(0.002, use_ema=True, ema_overwrite_frequency=1),
)
OPTIMIZER_FN = [
adadelta_fn,
adagrad_fn,
adam_fn,
adamax_fn,
adamw_fn,
ftrl_fn,
lion_fn,
nadam_fn,
rmsprop_fn,
sgd_fn,
]
# TODO(b/228209527): Combine this test with optimizer_test after
# fixing the NCCL issue.
class OptimizerPssTest(tf.test.TestCase, parameterized.TestCase):
def _get_model(self):
return keras.Sequential(
[keras.layers.Input(shape=(1,)), keras.layers.Dense(1)]
)
def _get_dataset_fn(self):
def dataset_fn(_):
x, y = [1, 1, 1, 0, 0, 0], [1, 1, 1, 0, 0, 0]
ds = tf.data.Dataset.from_tensor_slices((x, y))
ds = ds.repeat().batch(6)
return ds
return dataset_fn
def _verify_accumulators_updated(self, optimizer):
variables = optimizer.variables
for var in variables:
if "iteration" not in var.name and "learning_rate" not in var.name:
# Find a variable not iteration or learning_rate, and verify its
# value is updated (not 0).
self.assertNotAllEqual(var, 0)
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=STRATEGIES, optimizer_fn=OPTIMIZER_FN
)
)
def testGetGradientsInModelPss(self, strategy, optimizer_fn):
with strategy.scope():
model = self._get_model()
optimizer = optimizer_fn()
ds_fn = self._get_dataset_fn()
if isinstance(strategy, tf.distribute.ParameterServerStrategy):
ds = dataset_creator.DatasetCreator(ds_fn)
else:
ds = ds_fn(None)
model.compile(loss="mse", optimizer=optimizer)
model.fit(ds, epochs=1, steps_per_epoch=5)
self._verify_accumulators_updated(optimizer)
@ds_combinations.generate(
tf.__internal__.test.combinations.combine(
strategy=STRATEGIES, optimizer_fn=OPTIMIZER_FN
)
)
def testGetGradientsInCustomTrainingLoopPss(self, strategy, optimizer_fn):
coordinator = tf.distribute.experimental.coordinator.ClusterCoordinator(
strategy
)
with strategy.scope():
model = self._get_model()
optimizer = optimizer_fn()
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(
self._get_dataset_fn()
)
ds = coordinator.create_per_worker_dataset(per_worker_dataset_fn)
@tf.function
def METHOD_NAME(iterator):
def replica_fn(data):
features, labels = data
with tf.GradientTape() as tape:
output = model(tf.expand_dims(features, axis=1))
loss = keras.losses.MeanSquaredError(
reduction=losses_utils.ReductionV2.NONE
)(labels, output)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(
zip(grads, model.trainable_variables)
)
strategy.run(replica_fn, args=(next(iterator),))
for _ in range(3):
coordinator.schedule(METHOD_NAME, args=(iter(ds),))
coordinator.join()
self.assertEqual(self.evaluate(optimizer.iterations), 3)
self._verify_accumulators_updated(optimizer)
if __name__ == "__main__":
tf.__internal__.distribute.multi_process_runner.test_main() |
299,893 | save csv file in export file | import uuid
from datetime import date, datetime
from tempfile import NamedTemporaryFile
from typing import IO, TYPE_CHECKING, Any, Dict, List, Set, Union
import petl as etl
from django.utils import timezone
from ...giftcard.models import GiftCard
from ...plugins.manager import get_plugins_manager
from ...product.models import Product
from .. import FileTypes
from ..notifications import send_export_download_link_notification
from .product_headers import get_product_export_fields_and_headers_info
from .products_data import get_products_data
if TYPE_CHECKING:
from django.db.models import QuerySet
from ..models import ExportFile
BATCH_SIZE = 10000
def export_products(
export_file: "ExportFile",
scope: Dict[str, Union[str, dict]],
export_info: Dict[str, list],
file_type: str,
delimiter: str = ",",
):
from ...graphql.product.filters import ProductFilter
file_name = get_filename("product", file_type)
queryset = get_queryset(Product, ProductFilter, scope)
(
export_fields,
file_headers,
data_headers,
) = get_product_export_fields_and_headers_info(export_info)
temporary_file = create_file_with_headers(file_headers, delimiter, file_type)
export_products_in_batches(
queryset,
export_info,
set(export_fields),
data_headers,
delimiter,
temporary_file,
file_type,
)
METHOD_NAME(export_file, temporary_file, file_name)
temporary_file.close()
send_export_download_link_notification(export_file, "products")
manager = get_plugins_manager()
manager.product_export_completed(export_file)
def export_gift_cards(
export_file: "ExportFile",
scope: Dict[str, Union[str, dict]],
file_type: str,
delimiter: str = ",",
):
from ...graphql.giftcard.filters import GiftCardFilter
file_name = get_filename("gift_card", file_type)
queryset = get_queryset(GiftCard, GiftCardFilter, scope)
# only unused gift cards codes can be exported
queryset = queryset.filter(used_by_email__isnull=True)
export_fields = ["code"]
temporary_file = create_file_with_headers(export_fields, delimiter, file_type)
export_gift_cards_in_batches(
queryset,
export_fields,
delimiter,
temporary_file,
file_type,
)
METHOD_NAME(export_file, temporary_file, file_name)
temporary_file.close()
send_export_download_link_notification(export_file, "gift cards")
manager = get_plugins_manager()
manager.gift_card_export_completed(export_file)
def get_filename(model_name: str, file_type: str) -> str:
hash = uuid.uuid4()
return "{}_data_{}_{}.{}".format(
model_name, timezone.now().strftime("%d_%m_%Y_%H_%M_%S"), hash, file_type
)
def get_queryset(model, filter, scope: Dict[str, Union[str, dict]]) -> "QuerySet":
queryset = model.objects.all()
if "ids" in scope:
queryset = model.objects.filter(pk__in=scope["ids"])
elif "filter" in scope:
queryset = filter(data=parse_input(scope["filter"]), queryset=queryset).qs
queryset = queryset.order_by("pk")
return queryset
def parse_input(data: Any) -> Dict[str, Union[str, dict]]:
"""Parse input into correct data types.
Scope coming from Celery will be passed as strings.
"""
if "attributes" in data:
serialized_attributes = []
for attr in data.get("attributes") or []:
if "date_time" in attr:
if gte := attr["date_time"].get("gte"):
attr["date_time"]["gte"] = datetime.fromisoformat(gte)
if lte := attr["date_time"].get("lte"):
attr["date_time"]["lte"] = datetime.fromisoformat(lte)
if "date" in attr:
if gte := attr["date"].get("gte"):
attr["date"]["gte"] = date.fromisoformat(gte)
if lte := attr["date"].get("lte"):
attr["date"]["lte"] = date.fromisoformat(lte)
serialized_attributes.append(attr)
if serialized_attributes:
data["attributes"] = serialized_attributes
return data
def create_file_with_headers(file_headers: List[str], delimiter: str, file_type: str):
table = etl.wrap([file_headers])
if file_type == FileTypes.CSV:
temp_file = NamedTemporaryFile("ab+", suffix=".csv")
etl.tocsv(table, temp_file.name, delimiter=delimiter)
else:
temp_file = NamedTemporaryFile("ab+", suffix=".xlsx")
etl.io.xlsx.toxlsx(table, temp_file.name)
return temp_file
def export_products_in_batches(
queryset: "QuerySet",
export_info: Dict[str, list],
export_fields: Set[str],
headers: List[str],
delimiter: str,
temporary_file: Any,
file_type: str,
):
warehouses = export_info.get("warehouses")
attributes = export_info.get("attributes")
channels = export_info.get("channels")
for batch_pks in queryset_in_batches(queryset):
product_batch = Product.objects.filter(pk__in=batch_pks).prefetch_related(
"attributes",
"variants",
"collections",
"media",
"product_type",
"category",
)
export_data = get_products_data(
product_batch, export_fields, attributes, warehouses, channels
)
append_to_file(export_data, headers, temporary_file, file_type, delimiter)
def export_gift_cards_in_batches(
queryset: "QuerySet",
export_fields: List[str],
delimiter: str,
temporary_file: Any,
file_type: str,
):
for batch_pks in queryset_in_batches(queryset):
gift_card_batch = GiftCard.objects.filter(pk__in=batch_pks)
export_data = list(gift_card_batch.values(*export_fields))
append_to_file(export_data, export_fields, temporary_file, file_type, delimiter)
def queryset_in_batches(queryset):
"""Slice a queryset into batches.
Input queryset should be sorted be pk.
"""
start_pk = 0
while True:
qs = queryset.order_by("pk").filter(pk__gt=start_pk)[:BATCH_SIZE]
pks = list(qs.values_list("pk", flat=True))
if not pks:
break
yield pks
start_pk = pks[-1]
def append_to_file(
export_data: List[Dict[str, Union[str, bool]]],
headers: List[str],
temporary_file: Any,
file_type: str,
delimiter: str,
):
table = etl.fromdicts(export_data, header=headers, missing="")
if file_type == FileTypes.CSV:
etl.io.csv.appendcsv(table, temporary_file.name, delimiter=delimiter)
else:
etl.io.xlsx.appendxlsx(table, temporary_file.name)
def METHOD_NAME(
export_file: "ExportFile", temporary_file: IO[bytes], file_name: str
):
export_file.content_file.save(file_name, temporary_file) |
299,894 | forward | # based on https://github.com/isl-org/MiDaS
import cv2
import torch
import torch.nn as nn
from torchvision.transforms import Compose
from ldm.modules.midas.midas.dpt_depth import DPTDepthModel
from ldm.modules.midas.midas.midas_net import MidasNet
from ldm.modules.midas.midas.midas_net_custom import MidasNet_small
from ldm.modules.midas.midas.transforms import Resize, NormalizeImage, PrepareForNet
ISL_PATHS = {
"dpt_large": "midas_models/dpt_large-midas-2f21e586.pt",
"dpt_hybrid": "midas_models/dpt_hybrid-midas-501f0c75.pt",
"midas_v21": "",
"midas_v21_small": "",
}
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self
def load_midas_transform(model_type):
# https://github.com/isl-org/MiDaS/blob/master/run.py
# load transform only
if model_type == "dpt_large": # DPT-Large
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": # DPT-Hybrid
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21":
net_w, net_h = 384, 384
resize_mode = "upper_bound"
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
elif model_type == "midas_v21_small":
net_w, net_h = 256, 256
resize_mode = "upper_bound"
normalization = NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
else:
assert False, f"model_type '{model_type}' not implemented, use: --model_type large"
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method=resize_mode,
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
return transform
def load_model(model_type):
# https://github.com/isl-org/MiDaS/blob/master/run.py
# load network
model_path = ISL_PATHS[model_type]
if model_type == "dpt_large": # DPT-Large
model = DPTDepthModel(
path=model_path,
backbone="vitl16_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "dpt_hybrid": # DPT-Hybrid
model = DPTDepthModel(
path=model_path,
backbone="vitb_rn50_384",
non_negative=True,
)
net_w, net_h = 384, 384
resize_mode = "minimal"
normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
elif model_type == "midas_v21":
model = MidasNet(model_path, non_negative=True)
net_w, net_h = 384, 384
resize_mode = "upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
elif model_type == "midas_v21_small":
model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
non_negative=True, blocks={'expand': True})
net_w, net_h = 256, 256
resize_mode = "upper_bound"
normalization = NormalizeImage(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
)
else:
print(f"model_type '{model_type}' not implemented, use: --model_type large")
assert False
transform = Compose(
[
Resize(
net_w,
net_h,
resize_target=None,
keep_aspect_ratio=True,
ensure_multiple_of=32,
resize_method=resize_mode,
image_interpolation_method=cv2.INTER_CUBIC,
),
normalization,
PrepareForNet(),
]
)
return model.eval(), transform
class MiDaSInference(nn.Module):
MODEL_TYPES_TORCH_HUB = [
"DPT_Large",
"DPT_Hybrid",
"MiDaS_small"
]
MODEL_TYPES_ISL = [
"dpt_large",
"dpt_hybrid",
"midas_v21",
"midas_v21_small",
]
def __init__(self, model_type):
super().__init__()
assert (model_type in self.MODEL_TYPES_ISL)
model, _ = load_model(model_type)
self.model = model
self.model.train = disabled_train
def METHOD_NAME(self, x):
# x in 0..1 as produced by calling self.transform on a 0..1 float64 numpy array
# NOTE: we expect that the correct transform has been called during dataloading.
with torch.no_grad():
prediction = self.model(x)
prediction = torch.nn.functional.interpolate(
prediction.unsqueeze(1),
size=x.shape[2:],
mode="bicubic",
align_corners=False,
)
assert prediction.shape == (x.shape[0], 1, x.shape[2], x.shape[3])
return prediction
|
299,895 | calc line col | import re
import traceback
import sys
import modgrammar
_whitespace_re = re.compile('\s+')
def update_best_error(current_best, err):
if not current_best:
return err
errpos = err[0]
bestpos = current_best[0]
if errpos > bestpos:
return err
if errpos == bestpos:
current_best[1].update(err[1])
return current_best
def best_error_result(err_list):
if len(err_list) == 1:
# This will be by far the most common case, so check it first.
err = err_list[0]
else:
pos = max((x[0] for x in err_list))
nodes = set().union(*(x[1] for x in err_list if x[0] == pos))
err = (pos, nodes)
return (False, err)
def error_result(index, node):
if not isinstance(node, set):
node = set([node])
return (False, (index, node))
def regularize(grammar):
if hasattr(grammar, 'grammar_parse'):
return (grammar,)
if isinstance(grammar, str):
return (modgrammar.LITERAL(grammar),)
if grammar is None:
return (modgrammar.EMPTY,)
try:
result = []
for g in grammar:
result.extend(regularize(g))
return tuple(result)
except TypeError:
raise modgrammar.GrammarDefError(
"object of type '%s' cannot be converted to Grammar" % (type(grammar).__name__,))
_anongrammar_attrs = ('grammar_collapse', 'grammar_desc', 'grammar_name', 'grammar_whitespace', 'grammar_tags')
def is_simple_anongrammar(cls):
if not issubclass(cls, modgrammar.AnonGrammar):
return False
for attr in _anongrammar_attrs:
if getattr(cls, attr) != getattr(modgrammar.AnonGrammar, attr):
return False
return True
def add_grammar(one, two):
one = regularize(one)
two = regularize(two)
if len(one) == 1 and is_simple_anongrammar(one[0]):
one = one[0].grammar
if len(two) == 1 and is_simple_anongrammar(two[0]):
two = two[0].grammar
return modgrammar.GRAMMAR(*(one + two))
classdict_map = dict(count='grammar_count', min='grammar_min', max='grammar_max', collapse='grammar_collapse',
collapse_skip='grammar_collapse_skip', tags='grammar_tags', greedy='grammar_greedy',
whitespace='grammar_whitespace')
def make_classdict(base, grammar, kwargs, **defaults):
cdict = {}
for d in defaults, kwargs:
for key, value in d.items():
key = classdict_map.get(key, key)
cdict[key] = value
cdict['grammar'] = grammar
if not "grammar_whitespace" in cdict and base.grammar_whitespace is None:
mdict = get_calling_module().__dict__
whitespace = mdict.get("grammar_whitespace", modgrammar.grammar_whitespace)
cdict["grammar_whitespace"] = whitespace
return cdict
def METHOD_NAME(string, count, line=0, col=0, tabs=1):
pos = 0
while True:
p = string.find('\n', pos, count) + 1
if not p:
break
pos = p
line += 1
col = 0
if tabs != 1:
lastline = (" " * col) + string[pos:count]
lastline = lastline.expandtabs(tabs)
col = len(lastline)
else:
col += count - pos
return (line, col)
def get_calling_module(stack=None):
if stack is None:
stack = traceback.extract_stack(None)
for s in reversed(stack):
filename = s[0]
if filename == "<stdin>":
return sys.modules["__main__"]
elif __file__.startswith(filename) or modgrammar.__file__.startswith(filename):
continue # Bug with .pyc cache files fixed
else:
for m in sys.modules.values():
if getattr(m, "__file__", '').startswith(filename):
return m
# For some reason, we weren't able to determine the module. Not much we
# can do here..
return None
class RepeatingTuple(tuple):
def __new__(cls, first_item, successive_items, len=None):
o = tuple.__new__(cls, [first_item, successive_items])
o.len = len
return o
def __getitem__(self, index):
if self.len is not None and index >= self.len:
raise IndexError('tuple index out of range')
if index == 0:
return tuple.__getitem__(self, 0)
else:
return tuple.__getitem__(self, 1)
def __len__(self):
return self.len
def get_ebnf_names(glist, opts):
names = []
nts = []
for g in glist:
if g not in nts:
nts.append(g)
name, ntlist = g.grammar_ebnf_lhs(opts)
names.append(name)
for nt in ntlist:
if nt not in nts:
nts.append(nt)
return (names, nts)
def ebnf_specialseq(grammar, opts, name=None, details=None, desc=None):
style = opts['special_style']
if style == 'python':
text = details or grammar.grammar_details()
elif style == 'name':
text = name or grammar.grammar_name
else:
text = desc or getattr(grammar, 'grammar_desc', None)
if not text:
text = name or grammar.grammar_name
return '? {0} ?'.format(text) |
299,896 | gen hostdev migratable | import os
import re
import shutil
from aexpect import remote
from provider.migration import base_steps
from provider.sriov import check_points
from provider.sriov import sriov_base
from virttest import data_dir
from virttest.libvirt_xml import vm_xml
from virttest.utils_test import libvirt
def run(test, params, env):
"""
Migrate vm with failover settings.
:param test: test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
def METHOD_NAME(hostdev_dev):
"""
Generate a migratable xml for the VM with hostdev device
:param hostdev_dev: hostdev device object
"""
remote_pwd = params.get("migrate_dest_pwd")
remote_ip = params.get("migrate_dest_host")
remote_user = params.get("remote_user", "root")
guest_xml = vm_xml.VMXML.new_from_dumpxml(vm.name,
options="--migratable")
guest_xml.remove_all_device_by_type("hostdev")
guest_xml.add_device(hostdev_dev)
guest_xml.xmltreefile.write()
tmp_dir = data_dir.get_tmp_dir()
xmlfile = os.path.join(tmp_dir, "xml_file")
shutil.copyfile(guest_xml.xml, xmlfile)
remote_session.cmd("mkdir -p %s" % tmp_dir)
remote.scp_to_remote(remote_ip, '22', remote_user, remote_pwd,
xmlfile, xmlfile, limit="",
log_filename=None, timeout=600,
interface=None)
params["virsh_migrate_extra"] += "--xml %s" % xmlfile
def setup_test():
"""
Test setup
"""
iface_dict = sriov_src_obj.parse_iface_dict()
sriov_dest_obj.setup_failover_test(**test_dict)
sriov_src_obj.setup_failover_test(**test_dict)
iface_dev = sriov_src_obj.create_iface_dev(dev_type, iface_dict)
libvirt.add_vm_device(vm_xml.VMXML.new_from_dumpxml(vm_name), iface_dev)
libvirt.set_vm_disk(vm, params)
verify_network()
if cmd_during_mig:
vm_session = vm.wait_for_serial_login(timeout=240)
vm_session.cmd(cmd_during_mig)
vm_session.close()
if dev_type == "hostdev_device":
iface_dict = sriov_dest_obj.parse_iface_dict()
hostdev_dev = sriov_dest_obj.create_iface_dev(dev_type, iface_dict)
METHOD_NAME(hostdev_dev)
def verify_network():
"""
Verify network function
"""
vm.cleanup_serial_console()
vm.create_serial_console()
vm_session = vm.wait_for_serial_login(timeout=240)
check_points.check_vm_iface_num(vm_session, expr_iface_no,
timeout=40, first=15)
check_points.check_vm_network_accessed(vm_session,
tcpdump_iface=br_name,
tcpdump_status_error=True)
br_name = params.get("br_name")
dev_type = params.get("dev_type", "")
cmd_during_mig = params.get("cmd_during_mig")
expr_iface_no = int(params.get("expr_iface_no", '3'))
vm_tmp_file = params.get("vm_tmp_file")
status_error = "yes" == params.get("status_error", "no")
vm_name = params.get("migrate_main_vm")
vm = env.get_vm(vm_name)
server_ip = params.get("server_ip")
server_user = params.get("server_user", "root")
server_pwd = params.get("server_pwd")
remote_session = remote.wait_for_login('ssh', server_ip, '22',
server_user, server_pwd,
r"[\#\$]\s*$")
migration_obj = base_steps.MigrationBase(test, vm, params)
sriov_src_obj = sriov_base.SRIOVTest(vm, test, params)
sriov_dest_obj = sriov_base.SRIOVTest(
vm, test, params, session=remote_session)
test_dict = sriov_src_obj.parse_iommu_test_params()
try:
setup_test()
migration_obj.run_migration()
migration_obj.verify_default()
verify_network()
if not status_error:
migration_obj.run_migration_back()
verify_network()
if vm_tmp_file:
vm_session = vm.wait_for_serial_login(timeout=240)
cmd_result = vm_session.cmd_output('cat %s' % vm_tmp_file)
if re.findall('Destination Host Unreachable', cmd_result, re.M):
err_msg = ("The network does not work well during the "
"migration period. Ping output: %s" % cmd_result)
test.fail(err_msg)
finally:
migration_obj.cleanup_default()
sriov_dest_obj.teardown_failover_test(**test_dict)
sriov_src_obj.teardown_failover_test(**test_dict) |
299,897 | flush | import os
import sys
import logging
import traceback
from mindsdb.utilities.config import Config
from functools import partial
class LoggerWrapper(object):
def __init__(self, writer_arr, default_writer_pos):
self._writer_arr = writer_arr
self.default_writer_pos = default_writer_pos
def write(self, message):
if len(message.strip(' \n')) == 0:
return
if 'DEBUG:' in message:
self._writer_arr[0](message)
elif 'INFO:' in message:
self._writer_arr[1](message)
elif 'WARNING:' in message:
self._writer_arr[2](message)
elif 'ERROR:' in message:
self._writer_arr[3](message)
else:
self._writer_arr[self.default_writer_pos](message)
def METHOD_NAME(self):
pass
def isatty(self):
return True # assumes terminal attachment
def fileno(self):
return 1 # stdout
# class DbHandler(logging.Handler):
# def __init__(self):
# logging.Handler.__init__(self)
# self.company_id = os.environ.get('MINDSDB_COMPANY_ID', None)
#
# def emit(self, record):
# self.format(record)
# if (
# len(record.message.strip(' \n')) == 0
# or (record.threadName == 'ray_print_logs' and 'mindsdb-logger' not in record.message)
# ):
# return
#
# log_type = record.levelname
# source = f'file: {record.pathname} - line: {record.lineno}'
# payload = record.msg
#
# if telemtry_enabled:
# pass
# # @TODO: Enable once we are sure no sensitive info is being outputed in the logs
# # if log_type in ['INFO']:
# # add_breadcrumb(
# # category='auth',
# # message=str(payload),
# # level='info',
# # )
# # Might be too much traffic if we send this for users with slow networks
# # if log_type in ['DEBUG']:
# # add_breadcrumb(
# # category='auth',
# # message=str(payload),
# # level='debug',
# # )
#
# if log_type in ['ERROR', 'WARNING']:
# trace = str(traceback.format_stack(limit=20))
# trac_log = Log(log_type='traceback', source=source, payload=trace, company_id=self.company_id)
# session.add(trac_log)
# session.commit()
#
# if telemtry_enabled:
# add_breadcrumb(
# category='stack_trace',
# message=trace,
# level='info',
# )
# if log_type in ['ERROR']:
# capture_message(str(payload))
# if log_type in ['WARNING']:
# capture_message(str(payload))
#
# log = Log(log_type=str(log_type), source=source, payload=str(payload), company_id=self.company_id)
# session.add(log)
# session.commit()
# default logger
logger = logging.getLogger('dummy')
def initialize_log(config=None, logger_name='main', wrap_print=False):
global logger
if config is None:
config = Config().get_all()
telemtry_enabled = os.getenv('CHECK_FOR_UPDATES', '1').lower() not in ['0', 'false', 'False']
if telemtry_enabled:
try:
import sentry_sdk
from sentry_sdk import capture_message, add_breadcrumb
sentry_sdk.init(
"https://29e64dbdf325404ebf95473d5f4a54d3@o404567.ingest.sentry.io/5633566",
traces_sample_rate=0 # Set to `1` to experiment with performance metrics
)
except (ImportError, ModuleNotFoundError) as e:
raise Exception(f"to use telemetry please install 'pip install mindsdb[telemetry]': {e}")
''' Create new logger
:param config: object, app config
:param logger_name: str, name of logger
:param wrap_print: bool, if true, then print() calls will be wrapped by log.debug() function.
'''
log = logging.getLogger(f'mindsdb.{logger_name}')
log.propagate = False
log.setLevel(min(
getattr(logging, config['log']['level']['console']),
getattr(logging, config['log']['level']['file'])
))
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
console_handler = logging.StreamHandler()
console_handler.setLevel(config['log']['level'].get('console', logging.INFO))
console_handler.setFormatter(formatter)
log.addHandler(console_handler)
# db_handler = DbHandler()
# db_handler.setLevel(config['log']['level'].get('db', logging.WARNING))
# db_handler.setFormatter(formatter)
# log.addHandler(db_handler)
if wrap_print:
sys.stdout = LoggerWrapper([log.debug, log.info, log.warning, log.error], 1)
sys.stderr = LoggerWrapper([log.debug, log.info, log.warning, log.error], 3)
log.error = partial(log.error, exc_info=True)
logger = log
def get_log(logger_name=None):
if logger_name is None:
return logging.getLogger('mindsdb')
return logging.getLogger(f'mindsdb.{logger_name}')
|
299,898 | content | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
#
# Code generated by aaz-dev-tools
# --------------------------------------------------------------------------------------------
# pylint: skip-file
# flake8: noqa
from azure.cli.core.aaz import *
@register_command(
"monitor action-group enable-receiver",
)
class EnableReceiver(AAZCommand):
"""Enable a receiver in an action group.
This changes the receiver's status from Disabled to Enabled. This operation is only supported for Email or SMS receivers.
"""
_aaz_info = {
"version": "2022-06-01",
"resources": [
["mgmt-plane", "/subscriptions/{}/resourcegroups/{}/providers/microsoft.insights/actiongroups/{}/subscribe", "2022-06-01"],
]
}
def _handler(self, command_args):
super()._handler(command_args)
self._execute_operations()
return None
_args_schema = None
@classmethod
def _build_arguments_schema(cls, *args, **kwargs):
if cls._args_schema is not None:
return cls._args_schema
cls._args_schema = super()._build_arguments_schema(*args, **kwargs)
# define Arg Group ""
_args_schema = cls._args_schema
_args_schema.action_group_name = AAZStrArg(
options=["--action-group", "--action-group-name"],
help="The name of the action group.",
required=True,
id_part="name",
)
_args_schema.resource_group = AAZResourceGroupNameArg(
help="Name of resource group. You can configure the default group using `az configure --defaults group=<name>`.",
required=True,
)
_args_schema.receiver_name = AAZStrArg(
options=["-n", "--name", "--receiver-name"],
help="The name of the receiver to resubscribe.",
required=True,
)
return cls._args_schema
def _execute_operations(self):
self.pre_operations()
self.ActionGroupsEnableReceiver(ctx=self.ctx)()
self.post_operations()
@register_callback
def pre_operations(self):
pass
@register_callback
def post_operations(self):
pass
class ActionGroupsEnableReceiver(AAZHttpOperation):
CLIENT_TYPE = "MgmtClient"
def __call__(self, *args, **kwargs):
request = self.make_request()
session = self.client.send_request(request=request, stream=False, **kwargs)
if session.http_response.status_code in [200]:
return self.on_200(session)
return self.on_error(session.http_response)
@property
def url(self):
return self.client.format_url(
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/actionGroups/{actionGroupName}/subscribe",
**self.url_parameters
)
@property
def method(self):
return "POST"
@property
def error_format(self):
return "ODataV4Format"
@property
def url_parameters(self):
parameters = {
**self.serialize_url_param(
"actionGroupName", self.ctx.args.action_group_name,
required=True,
),
**self.serialize_url_param(
"resourceGroupName", self.ctx.args.resource_group,
required=True,
),
**self.serialize_url_param(
"subscriptionId", self.ctx.subscription_id,
required=True,
),
}
return parameters
@property
def query_parameters(self):
parameters = {
**self.serialize_query_param(
"api-version", "2022-06-01",
required=True,
),
}
return parameters
@property
def header_parameters(self):
parameters = {
**self.serialize_header_param(
"Content-Type", "application/json",
),
}
return parameters
@property
def METHOD_NAME(self):
_content_value, _builder = self.new_content_builder(
self.ctx.args,
typ=AAZObjectType,
typ_kwargs={"flags": {"required": True, "client_flatten": True}}
)
_builder.set_prop("receiverName", AAZStrType, ".receiver_name", typ_kwargs={"flags": {"required": True}})
return self.serialize_content(_content_value)
def on_200(self, session):
pass
class _EnableReceiverHelper:
"""Helper class for EnableReceiver"""
__all__ = ["EnableReceiver"] |
299,899 | test capture | __copyright__ = "Copyright (C) 2014-2016 Martin Blais"
__license__ = "GNU GPLv2"
import unittest
import io
import os
from os import path
from beancount.utils import test_utils
class TestTestUtils(unittest.TestCase):
def test_tempdir(self):
with test_utils.tempdir() as tempdir:
with open(path.join(tempdir, 'file1'), 'w'): pass
os.mkdir(path.join(tempdir, 'directory'))
with open(path.join(tempdir, 'directory', 'file2'), 'w'): pass
self.assertFalse(path.exists(tempdir))
self.assertFalse(path.exists(path.join(tempdir, 'file1')))
self.assertFalse(path.exists(path.join(tempdir, 'directory')))
def test_create_temporary_files(self):
with test_utils.tempdir() as tmp:
test_utils.create_temporary_files(tmp, {
'apples.beancount': """
include "{root}/fruits/oranges.beancount"
2014-01-01 open Assets:Apples
""",
'fruits/oranges.beancount': """
2014-01-02 open Assets:Oranges
"""})
# Check the total list of files.
apples = path.join(tmp, 'apples.beancount')
oranges = path.join(tmp, 'fruits/oranges.beancount')
self.assertEqual({apples, oranges},
set(path.join(root, filename)
for root, _, files in os.walk(tmp)
for filename in files))
# Check the contents of apples (with replacement of root).
with open(apples) as f: apples_content = f.read()
self.assertRegex(apples_content, 'open Assets:Apples')
self.assertNotRegex(apples_content, '{root}')
# Check the contents of oranges.
with open(oranges) as f: oranges_content = f.read()
self.assertRegex(oranges_content, 'open Assets:Oranges')
def METHOD_NAME(self):
text = "b9baaa0c-0f0a-47db-bffc-a00c6f4ac1db"
with test_utils.capture() as output:
self.assertTrue(isinstance(output, io.StringIO))
print(text)
self.assertEqual(text + "\n", output.getvalue())
@test_utils.docfile
def test_docfile(self, filename):
"7f9034b1-51e7-420c-ac6b-945b5c594ebf"
with open(filename) as f: uuid = f.read()
self.assertEqual("7f9034b1-51e7-420c-ac6b-945b5c594ebf", uuid)
@test_utils.docfile_extra(suffix='.txt')
def test_docfile_extra(self, filename):
"7f9034b1-51e7-420c-ac6b-945b5c594ebf"
with open(filename) as f: uuid = f.read()
self.assertEqual("7f9034b1-51e7-420c-ac6b-945b5c594ebf", uuid)
self.assertTrue('.txt' in filename)
def test_search_words(self):
test_utils.search_words('i walrus is',
'i am the walrus is not chicago')
test_utils.search_words('i walrus is'.split(),
'i am the walrus is not chicago')
def test_environ_contextmanager(self):
with test_utils.environ('PATH', '/unlikely-to-be-your-path'):
self.assertEqual('/unlikely-to-be-your-path', os.getenv('PATH'))
self.assertNotEqual('/unlikely-to-be-your-path', os.getenv('PATH'))
class TestTestCase(test_utils.TestCase):
def test_assertLines(self):
self.assertLines("""
43c62bff-8504-44ea-b5c0-afa218a7a973
95ef1cc4-0016-4452-9f4e-1a053db2bc83
""", """
43c62bff-8504-44ea-b5c0-afa218a7a973
95ef1cc4-0016-4452-9f4e-1a053db2bc83
""")
with self.assertRaises(AssertionError):
self.assertLines("""
43c62bff-8504-44ea-b5c0-afa218a7a973
""", """
683f111f-f921-4db3-a3e8-daae344981e8
""")
def test_assertOutput(self):
with self.assertOutput("""
3165efbc-c775-4503-be13-06b7167697a9
"""):
print('3165efbc-c775-4503-be13-06b7167697a9')
with self.assertRaises(AssertionError):
with self.assertOutput("""
3165efbc-c775-4503-be13-06b7167697a9
"""):
print('78d58502a15e')
class TestSkipIfRaises(unittest.TestCase):
def test_decorator(self):
@test_utils.skipIfRaises(ValueError)
def decorator_no_skip():
pass
decorator_no_skip()
@test_utils.skipIfRaises(ValueError)
def decorator_skip():
raise ValueError
with self.assertRaises(unittest.SkipTest):
decorator_skip()
def test_decorator_many(self):
@test_utils.skipIfRaises(ValueError, IndexError)
def decorator_skip():
raise ValueError
with self.assertRaises(unittest.SkipTest):
decorator_skip()
def test_contextmanager(self):
with test_utils.skipIfRaises(ValueError):
pass
with self.assertRaises(unittest.SkipTest):
with test_utils.skipIfRaises(ValueError):
raise ValueError
@test_utils.nottest
def test_not_really():
assert False
if __name__ == '__main__':
unittest.main() |