text
stringlengths
28
881k
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and otherNEWLINE# Spack Project Developers. See the top-level COPYRIGHT file for details.NEWLINE#NEWLINE# SPDX-License-Identifier: (Apache-2.0 OR MIT)NEWLINENEWLINEfrom spack import *NEWLINENEWLINENEWLINEclass RCdcfluview(RPackage):NEWLINE """The 'U.S.' Centers for Disease Control ('CDC') maintains a portalNEWLINE <http://gis.cdc.gov/grasp/fluview/fluportaldashboard.html> for accessingNEWLINE state, regional and national influenza statistics as well as MortalityNEWLINE Surveillance Data. The web interface makes it difficult and time-consumingNEWLINE to select and retrieve influenza data. Tools are provided to access theNEWLINE data provided by the portal's underlying 'API'."""NEWLINENEWLINE homepage = "https://cloud.r-project.org/package=cdcfluview"NEWLINE url = "https://cloud.r-project.org/src/contrib/cdcfluview_0.7.0.tar.gz"NEWLINE list_url = "https://cloud.r-project.org/src/contrib/Archive/cdcfluview"NEWLINENEWLINE version('0.9.0', sha256='1b2064886858cbb1790ef808d88fbab75d3a9cf55e720638221a3377ff8dd244')NEWLINE version('0.7.0', sha256='8c8978d081f8472a6ed5ec54c4e6dd906f97ee28d0f88eef1514088f041ecc03')NEWLINENEWLINE depends_on('r@3.2.0:', type=('build', 'run'))NEWLINE depends_on('r-httr', type=('build', 'run'))NEWLINE depends_on('r-dplyr', type=('build', 'run'))NEWLINE depends_on('r-jsonlite', type=('build', 'run'))NEWLINE depends_on('r-sf', type=('build', 'run'))NEWLINE depends_on('r-xml2', type=('build', 'run'))NEWLINE depends_on('r-purrr', type=('build', 'run'))NEWLINE depends_on('r-readr', type=('build', 'run'))NEWLINE depends_on('r-mmwrweek', type=('build', 'run'))NEWLINE depends_on('r-units@0.4-6:', type=('build', 'run'))NEWLINE
import importlibNEWLINEimport pkgutilNEWLINENEWLINEfrom pkg_resources import get_distribution, DistributionNotFoundNEWLINENEWLINENEWLINEdef get_package_version():NEWLINE """Get package versionNEWLINENEWLINE Returns:NEWLINE str: Installed package version, or 0.0.0.dev if not fully installedNEWLINE """NEWLINE try:NEWLINE return get_distribution(__name__.split('.')[0]).versionNEWLINE except DistributionNotFound:NEWLINE return '0.0.0.dev'NEWLINENEWLINENEWLINEdef get_file_extension(filepath):NEWLINE """Return full file extension from filepath"""NEWLINE filename = filepath.split('/')[-1]NEWLINENEWLINE return filename[filename.index('.'):]NEWLINENEWLINENEWLINEdef get_full_qualname(cls):NEWLINE """Return fully qualified class name"""NEWLINE return cls.__module__ + '.' + cls.__name__NEWLINENEWLINENEWLINEdef get_recursive_subclasses(cls):NEWLINE """Return list of all subclasses for a class, including subclasses of direct subclasses"""NEWLINE return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in get_recursive_subclasses(s)]NEWLINENEWLINENEWLINEdef import_submodules(package):NEWLINE """Return list of imported module instances from beneath root_package"""NEWLINENEWLINE if isinstance(package, str):NEWLINE package = importlib.import_module(package)NEWLINENEWLINE results = {}NEWLINENEWLINE if hasattr(package, '__path__'):NEWLINE for _, name, is_pkg in pkgutil.walk_packages(package.__path__):NEWLINE full_name = package.__name__ + '.' + nameNEWLINE try:NEWLINE results[full_name] = importlib.import_module(full_name)NEWLINENEWLINE if is_pkg:NEWLINE results.update(import_submodules(full_name))NEWLINE except ImportError:NEWLINE # Ignore import failures for now; Quickest fix to support contrib serializers as extras with just depsNEWLINE continueNEWLINENEWLINE return resultsNEWLINE
import osNEWLINEimport clickNEWLINENEWLINEdef register(app):NEWLINE @app.cli.group()NEWLINE def translate():NEWLINE """translation and localization"""NEWLINE passNEWLINENEWLINE @translate.command()NEWLINE def update():NEWLINE """Update all languages."""NEWLINE if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):NEWLINE raise RuntimeError('extract command failed')NEWLINE if os.system('pybabel update -i messages.pot -d app/translations'):NEWLINE raise RuntimeError('update command failed')NEWLINE os.remove('messages.pot')NEWLINENEWLINENEWLINE @translate.command()NEWLINE def compile():NEWLINE """Compile all languages."""NEWLINE if os.system('pybabel compile -d app/translations'):NEWLINE raise RuntimeError('compile command failed')NEWLINENEWLINENEWLINENEWLINE @translate.command()NEWLINE @click.argument('lang')NEWLINE def init(lang):NEWLINE """Initialize a new language."""NEWLINE if os.system('pybabel extract -F babel.cfg -k _l -o messages.pot .'):NEWLINE raise RuntimeError('extract command failed')NEWLINE if os.system(NEWLINE 'pybabel init -i messages.pot -d app/translations -l ' + lang):NEWLINE raise RuntimeError('init command failed')NEWLINE os.remove('messages.pot')NEWLINE
##############################################################################NEWLINE#NEWLINE# Copyright (c) 2001, 2002, 2009 Zope Foundation and Contributors.NEWLINE# All Rights Reserved.NEWLINE#NEWLINE# This software is subject to the provisions of the Zope Public License,NEWLINE# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.NEWLINE# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIEDNEWLINE# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIEDNEWLINE# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESSNEWLINE# FOR A PARTICULAR PURPOSE.NEWLINE#NEWLINE##############################################################################NEWLINE"""Examples supporting Sphinx doctest snippets.NEWLINE"""NEWLINEimport sysNEWLINENEWLINEfrom zope.interface import InterfaceNEWLINEfrom zope.interface import implementerNEWLINEfrom zope.interface.interfaces import IInterfaceNEWLINENEWLINEfrom zope.component._declaration import adapterNEWLINEfrom zope.component.testfiles.views import ICNEWLINENEWLINEdef write(x):NEWLINE sys.stdout.write('%s\n' % x)NEWLINENEWLINEclass ITestType(IInterface):NEWLINE passNEWLINENEWLINENEWLINEclass I1(Interface):NEWLINE passNEWLINENEWLINEclass I2(Interface):NEWLINE passNEWLINENEWLINEclass I3(Interface):NEWLINE passNEWLINENEWLINEclass I4(Interface):NEWLINE passNEWLINENEWLINEclass IGI(Interface):NEWLINE passNEWLINENEWLINEclass IQI(Interface):NEWLINE passNEWLINENEWLINEclass ISI(Interface):NEWLINE passNEWLINENEWLINEclass ISII(Interface):NEWLINE passNEWLINENEWLINEclass U(object):NEWLINENEWLINE def __init__(self, name):NEWLINE self.__name__ = nameNEWLINENEWLINE def __repr__(self):NEWLINE return "%s(%s)" % (self.__class__.__name__, self.__name__)NEWLINENEWLINE@implementer(I1)NEWLINEclass U1(U):NEWLINE passNEWLINENEWLINE@implementer(I1, I2)NEWLINEclass U12(U):NEWLINE passNEWLINENEWLINE@adapter(I1)NEWLINEdef handle1(x):NEWLINE write('handle1 %s' % x)NEWLINENEWLINEdef handle2(*objects):NEWLINE write( 'handle2 ' + repr(objects))NEWLINENEWLINE@adapter(I1)NEWLINEdef handle3(x):NEWLINE write( 'handle3 %s' % x)NEWLINENEWLINE@adapter(I1)NEWLINEdef handle4(x):NEWLINE write( 'handle4 %s' % x)NEWLINENEWLINEclass GlobalRegistry:NEWLINE passNEWLINENEWLINEfrom zope.component.globalregistry import GlobalAdapterRegistryNEWLINEbase = GlobalAdapterRegistry(GlobalRegistry, 'adapters')NEWLINEGlobalRegistry.adapters = baseNEWLINEdef clear_base():NEWLINE base.__init__(GlobalRegistry, 'adapters')NEWLINENEWLINENEWLINE@implementer(I1)NEWLINEclass Ob(object):NEWLINE def __repr__(self):NEWLINE return '<instance Ob>'NEWLINENEWLINENEWLINEob = Ob()NEWLINENEWLINE@implementer(I2)NEWLINEclass Ob2(object):NEWLINE def __repr__(self):NEWLINE return '<instance Ob2>'NEWLINENEWLINE@implementer(IC)NEWLINEclass Ob3(object):NEWLINE passNEWLINENEWLINE@implementer(I2)NEWLINEclass Comp(object):NEWLINE def __init__(self, context):NEWLINE self.context = contextNEWLINENEWLINEcomp = Comp(1)NEWLINENEWLINENEWLINEclass ConformsToIComponentLookup(object):NEWLINE """Allow a dummy sitemanager to conform/adapt to `IComponentLookup`."""NEWLINENEWLINE def __init__(self, sitemanager):NEWLINE self.sitemanager = sitemanagerNEWLINENEWLINE def __conform__(self, interface):NEWLINE """This method is specified by the adapter PEP to do the adaptation."""NEWLINE from zope.interface.interfaces import IComponentLookupNEWLINE if interface is IComponentLookup:NEWLINE return self.sitemanagerNEWLINENEWLINENEWLINEdef clearZCML(test=None):NEWLINE from zope.configuration.xmlconfig import XMLConfigNEWLINE import zope.componentNEWLINE from zope.component.testing import setUpNEWLINE from zope.component.testing import tearDownNEWLINE tearDown()NEWLINE setUp()NEWLINE XMLConfig('meta.zcml', zope.component)()NEWLINE
"""NEWLINE OpenVINO DL WorkbenchNEWLINE Dataset annotator moduleNEWLINENEWLINE Copyright (c) 2021 Intel CorporationNEWLINENEWLINE Licensed under the Apache License, Version 2.0 (the "License");NEWLINE you may not use this file except in compliance with the License.NEWLINE You may obtain a copy of the License atNEWLINE http://www.apache.org/licenses/LICENSE-2.0NEWLINE Unless required by applicable law or agreed to in writing, softwareNEWLINE distributed under the License is distributed on an "AS IS" BASIS,NEWLINE WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE See the License for the specific language governing permissions andNEWLINE limitations under the License.NEWLINE"""NEWLINEfrom wb.main.scripts.dataset_annotator.dataset_annotator import DatasetAnnotatorNEWLINEfrom wb.main.scripts.dataset_annotator.task_to_auto_annotated_dataset_type_mapper import \NEWLINE TaskToAutoAnnotatedDatasetTypeMapperNEWLINE
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.NEWLINE#NEWLINE# Licensed under the Apache License, Version 2.0 (the "License");NEWLINE# you may not use this file except in compliance with the License.NEWLINE# You may obtain a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS,NEWLINE# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.NEWLINE# See the License for the specific language governing permissions andNEWLINE# limitations under the License.NEWLINE# ==============================================================================NEWLINE"""Tests for Keras text category_encoding preprocessing layer."""NEWLINENEWLINEfrom __future__ import absolute_importNEWLINEfrom __future__ import divisionNEWLINEfrom __future__ import print_functionNEWLINENEWLINENEWLINEfrom absl.testing import parameterizedNEWLINEimport numpy as npNEWLINENEWLINEfrom tensorflow.python import kerasNEWLINENEWLINEfrom tensorflow.python.data.ops import dataset_opsNEWLINEfrom tensorflow.python.eager import contextNEWLINEfrom tensorflow.python.framework import constant_opNEWLINEfrom tensorflow.python.framework import dtypesNEWLINEfrom tensorflow.python.framework import errorsNEWLINEfrom tensorflow.python.framework import sparse_tensorNEWLINEfrom tensorflow.python.keras import backendNEWLINEfrom tensorflow.python.keras import keras_parameterizedNEWLINEfrom tensorflow.python.keras.layers import coreNEWLINEfrom tensorflow.python.keras.layers.preprocessing import category_encodingNEWLINEfrom tensorflow.python.keras.layers.preprocessing import category_encoding_v1NEWLINEfrom tensorflow.python.keras.layers.preprocessing import preprocessing_test_utilsNEWLINEfrom tensorflow.python.ops import sparse_opsNEWLINEfrom tensorflow.python.ops.ragged import ragged_factory_opsNEWLINEfrom tensorflow.python.platform import testNEWLINENEWLINENEWLINEdef get_layer_class():NEWLINE if context.executing_eagerly():NEWLINE return category_encoding.CategoryEncodingNEWLINE else:NEWLINE return category_encoding_v1.CategoryEncodingNEWLINENEWLINENEWLINE@keras_parameterized.run_all_keras_modes(always_skip_v1=True)NEWLINEclass CategoryEncodingInputTest(keras_parameterized.TestCase,NEWLINE preprocessing_test_utils.PreprocessingLayerTestNEWLINE ):NEWLINENEWLINE def test_dense_input_sparse_output(self):NEWLINE input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])NEWLINENEWLINE # The expected output should be (X for missing value):NEWLINE # [[X, 1, 1, 1]NEWLINE # [1, X, X, X]NEWLINE # [X, X, X, 2]]NEWLINE expected_indices = [[0, 1], [0, 2], [0, 3], [1, 0], [1, 3]]NEWLINE expected_values = [1, 1, 1, 1, 2]NEWLINE max_tokens = 6NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.COUNT, sparse=True)NEWLINE int_data = layer(input_data)NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE sp_output_dataset = model.predict(input_array, steps=1)NEWLINE self.assertAllEqual(expected_values, sp_output_dataset.values)NEWLINE self.assertAllEqual(expected_indices, sp_output_dataset.indices)NEWLINENEWLINE # Assert sparse output is same as dense output.NEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens,NEWLINE output_mode=category_encoding.COUNT,NEWLINE sparse=False)NEWLINE int_data = layer(input_data)NEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array, steps=1)NEWLINE self.assertAllEqual(NEWLINE sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),NEWLINE output_dataset)NEWLINENEWLINE def test_sparse_input(self):NEWLINE input_array = np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64)NEWLINE sparse_tensor_data = sparse_ops.from_dense(input_array)NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 1, 1, 1, 0, 0],NEWLINE [0, 1, 0, 1, 0, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 6NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)NEWLINENEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.BINARY)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(sparse_tensor_data, steps=1)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_sparse_input_with_weights(self):NEWLINE input_array = np.array([[1, 2, 3, 4], [4, 3, 1, 4]], dtype=np.int64)NEWLINE weights_array = np.array([[.1, .2, .3, .4], [.2, .1, .4, .3]])NEWLINE sparse_tensor_data = sparse_ops.from_dense(input_array)NEWLINE sparse_weight_data = sparse_ops.from_dense(weights_array)NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, .1, .2, .3, .4, 0],NEWLINE [0, .4, 0, .1, .5, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 6NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)NEWLINE weight_data = keras.Input(shape=(None,), dtype=dtypes.float32, sparse=True)NEWLINENEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.COUNT)NEWLINE int_data = layer(input_data, count_weights=weight_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)NEWLINE output_dataset = model.predict([sparse_tensor_data, sparse_weight_data],NEWLINE steps=1)NEWLINE self.assertAllClose(expected_output, output_dataset)NEWLINENEWLINE def test_sparse_input_sparse_output(self):NEWLINE sp_inp = sparse_tensor.SparseTensor(NEWLINE indices=[[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]],NEWLINE values=[0, 2, 1, 1, 0],NEWLINE dense_shape=[4, 2])NEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)NEWLINENEWLINE # The expected output should be (X for missing value):NEWLINE # [[1, X, X, X]NEWLINE # [X, X, 1, X]NEWLINE # [X, 2, X, X]NEWLINE # [1, X, X, X]]NEWLINE expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]NEWLINE expected_values = [1, 1, 2, 1]NEWLINE max_tokens = 6NEWLINENEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.COUNT, sparse=True)NEWLINE int_data = layer(input_data)NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE sp_output_dataset = model.predict(sp_inp, steps=1)NEWLINE self.assertAllEqual(expected_values, sp_output_dataset.values)NEWLINE self.assertAllEqual(expected_indices, sp_output_dataset.indices)NEWLINENEWLINE # Assert sparse output is same as dense output.NEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens,NEWLINE output_mode=category_encoding.COUNT,NEWLINE sparse=False)NEWLINE int_data = layer(input_data)NEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(sp_inp, steps=1)NEWLINE self.assertAllEqual(NEWLINE sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),NEWLINE output_dataset)NEWLINENEWLINE def test_sparse_input_sparse_output_with_weights(self):NEWLINE indices = [[0, 0], [1, 1], [2, 0], [2, 1], [3, 1]]NEWLINE sp_inp = sparse_tensor.SparseTensor(NEWLINE indices=indices, values=[0, 2, 1, 1, 0], dense_shape=[4, 2])NEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)NEWLINE sp_weight = sparse_tensor.SparseTensor(NEWLINE indices=indices, values=[.1, .2, .4, .3, .2], dense_shape=[4, 2])NEWLINE weight_data = keras.Input(shape=(None,), dtype=dtypes.float32, sparse=True)NEWLINENEWLINE # The expected output should be (X for missing value):NEWLINE # [[1, X, X, X]NEWLINE # [X, X, 1, X]NEWLINE # [X, 2, X, X]NEWLINE # [1, X, X, X]]NEWLINE expected_indices = [[0, 0], [1, 2], [2, 1], [3, 0]]NEWLINE expected_values = [.1, .2, .7, .2]NEWLINE max_tokens = 6NEWLINENEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.COUNT, sparse=True)NEWLINE int_data = layer(input_data, count_weights=weight_data)NEWLINENEWLINE model = keras.Model(inputs=[input_data, weight_data], outputs=int_data)NEWLINE sp_output_dataset = model.predict([sp_inp, sp_weight], steps=1)NEWLINE self.assertAllClose(expected_values, sp_output_dataset.values)NEWLINE self.assertAllEqual(expected_indices, sp_output_dataset.indices)NEWLINENEWLINE def test_ragged_input(self):NEWLINE input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 1]])NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 1, 1, 1, 0, 0],NEWLINE [0, 1, 0, 1, 0, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 6NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)NEWLINENEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.BINARY)NEWLINE int_data = layer(input_data)NEWLINENEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array, steps=1)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_ragged_input_sparse_output(self):NEWLINE input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 3]])NEWLINENEWLINE # The expected output should be (X for missing value):NEWLINE # [[X, 1, 1, 1]NEWLINE # [X, X, X, 2]]NEWLINE expected_indices = [[0, 1], [0, 2], [0, 3], [1, 3]]NEWLINE expected_values = [1, 1, 1, 2]NEWLINE max_tokens = 6NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.COUNT, sparse=True)NEWLINE int_data = layer(input_data)NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE sp_output_dataset = model.predict(input_array, steps=1)NEWLINE self.assertAllEqual(expected_values, sp_output_dataset.values)NEWLINE self.assertAllEqual(expected_indices, sp_output_dataset.indices)NEWLINENEWLINE # Assert sparse output is same as dense output.NEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens,NEWLINE output_mode=category_encoding.COUNT,NEWLINE sparse=False)NEWLINE int_data = layer(input_data)NEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array, steps=1)NEWLINE self.assertAllEqual(NEWLINE sparse_ops.sparse_tensor_to_dense(sp_output_dataset, default_value=0),NEWLINE output_dataset)NEWLINENEWLINE def test_sparse_output_and_dense_layer(self):NEWLINE input_array = constant_op.constant([[1, 2, 3], [3, 3, 0]])NEWLINENEWLINE max_tokens = 4NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE encoding_layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.COUNT,NEWLINE sparse=True)NEWLINE int_data = encoding_layer(input_data)NEWLINE dense_layer = keras.layers.Dense(units=1)NEWLINE output_data = dense_layer(int_data)NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=output_data)NEWLINE _ = model.predict(input_array, steps=1)NEWLINENEWLINE def test_dense_oov_input(self):NEWLINE input_array = constant_op.constant([[1, 2, 3], [4, 3, 4]])NEWLINE max_tokens = 3NEWLINE expected_output_shape = [None, max_tokens]NEWLINE encoder_layer = get_layer_class()(max_tokens)NEWLINE input_data = keras.Input(shape=(3,), dtype=dtypes.int32)NEWLINE int_data = encoder_layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE with self.assertRaisesRegex(errors.InvalidArgumentError,NEWLINE ".*must be less than max_token 3"):NEWLINE _ = model.predict(input_array, steps=1)NEWLINENEWLINENEWLINE@keras_parameterized.run_all_keras_modesNEWLINEclass CategoryEncodingAdaptTest(keras_parameterized.TestCase,NEWLINE preprocessing_test_utils.PreprocessingLayerTestNEWLINE ):NEWLINENEWLINE def test_sparse_adapt(self):NEWLINE vocab_data = sparse_ops.from_dense(NEWLINE np.array([[1, 1, 0, 1, 1, 2, 2, 0, 2, 3, 3, 0, 4]], dtype=np.int64))NEWLINE vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)NEWLINE input_array = sparse_ops.from_dense(NEWLINE np.array([[1, 2, 3, 0], [0, 3, 1, 0]], dtype=np.int64))NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 1, 1, 1, 0],NEWLINE [0, 1, 0, 1, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 5NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int64, sparse=True)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=None, output_mode=category_encoding.BINARY)NEWLINE layer.adapt(vocab_dataset)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array, steps=1)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_ragged_adapt(self):NEWLINE vocab_data = ragged_factory_ops.constant(NEWLINE np.array([[1, 1, 0, 1, 1], [2, 2], [0, 2, 3], [0, 4]]))NEWLINE vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)NEWLINE input_array = ragged_factory_ops.constant([[1, 2, 3], [3, 1]])NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 1, 1, 1, 0],NEWLINE [0, 1, 0, 1, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 5NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32, ragged=True)NEWLINENEWLINE layer = get_layer_class()(NEWLINE max_tokens=None, output_mode=category_encoding.BINARY)NEWLINE layer.adapt(vocab_dataset)NEWLINE int_data = layer(input_data)NEWLINENEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array, steps=1)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_hard_maximum_set_state_variables_after_build(self):NEWLINE state_variables = {category_encoding._NUM_ELEMENTS_NAME: 5}NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 1, 1, 1, 0],NEWLINE [1, 1, 0, 1, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 5NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.BINARY)NEWLINE int_data = layer(input_data)NEWLINE layer._set_state_variables(state_variables)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_soft_maximum_set_state_after_build(self):NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 1, 1, 1, 0],NEWLINE [1, 1, 0, 1, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 5NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=None, output_mode=category_encoding.BINARY)NEWLINE layer.build(input_data.shape)NEWLINE layer.set_num_elements(max_tokens)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_set_weights_fails_on_wrong_size_weights(self):NEWLINE tfidf_data = [.05, .5, .25, .2, .125]NEWLINE layer = get_layer_class()(max_tokens=6, output_mode=category_encoding.TFIDF)NEWLINENEWLINE with self.assertRaisesRegex(ValueError, ".*Layer weight shape.*"):NEWLINE layer.set_weights([np.array(tfidf_data)])NEWLINENEWLINE def test_set_num_elements_after_call_fails(self):NEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=None, output_mode=category_encoding.BINARY)NEWLINE layer.adapt([1, 2])NEWLINE _ = layer(input_data)NEWLINE with self.assertRaisesRegex(NEWLINE RuntimeError, ".*'max_tokens' arg must be set to None."):NEWLINE layer.set_num_elements(5)NEWLINENEWLINE def test_set_state_variables_after_call_fails(self):NEWLINE state_variables = {category_encoding._NUM_ELEMENTS_NAME: 5}NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=None, output_mode=category_encoding.BINARY)NEWLINE layer.adapt([1, 2])NEWLINE _ = layer(input_data)NEWLINE with self.assertRaisesRegex(RuntimeError, "Cannot update states.*"):NEWLINE layer._set_state_variables(state_variables)NEWLINENEWLINENEWLINE@keras_parameterized.run_all_keras_modesNEWLINE@keras_parameterized.run_all_keras_modesNEWLINEclass CategoryEncodingOutputTest(keras_parameterized.TestCase,NEWLINE preprocessing_test_utils.PreprocessingLayerTestNEWLINE ):NEWLINENEWLINE def test_binary_output_hard_maximum(self):NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 1, 1, 1, 0, 0],NEWLINE [1, 1, 0, 1, 0, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 6NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=max_tokens, output_mode=category_encoding.BINARY)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_binary_output_soft_maximum(self):NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 1, 1, 1, 0],NEWLINE [1, 1, 0, 1, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 5NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=None, output_mode=category_encoding.BINARY)NEWLINE layer.set_num_elements(max_tokens)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_count_output_hard_maximum(self):NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 2, 1, 1, 0, 0],NEWLINE [2, 1, 0, 1, 0, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 6NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(max_tokens=6, output_mode=category_encoding.COUNT)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_count_output_soft_maximum(self):NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])NEWLINENEWLINE # pyformat: disableNEWLINE expected_output = [[0, 2, 1, 1, 0],NEWLINE [2, 1, 0, 1, 0]]NEWLINE # pyformat: enableNEWLINE max_tokens = 5NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=None, output_mode=category_encoding.COUNT)NEWLINE layer.set_num_elements(max_tokens)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array)NEWLINE self.assertAllEqual(expected_output, output_dataset)NEWLINENEWLINE def test_tfidf_output_hard_maximum(self):NEWLINE tfidf_data = [.05, .5, .25, .2, .125]NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 4, 1, 0]])NEWLINENEWLINE # pyformat: disableNEWLINE # pylint: disable=bad-whitespaceNEWLINE expected_output = [[ 0, 1, .25, .2, 0, 0],NEWLINE [.1, .5, 0, 0, .125, 0]]NEWLINE # pylint: enable=bad-whitespaceNEWLINE # pyformat: enableNEWLINE max_tokens = 6NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(max_tokens=6, output_mode=category_encoding.TFIDF)NEWLINE layer.set_tfidf_data(tfidf_data)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array)NEWLINE self.assertAllClose(expected_output, output_dataset)NEWLINENEWLINE def test_tfidf_output_soft_maximum(self):NEWLINE tfidf_data = [.05, .5, .25, .2, .125]NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 4, 1, 0]])NEWLINENEWLINE # pyformat: disableNEWLINE # pylint: disable=bad-whitespaceNEWLINE expected_output = [[ 0, 1, .25, .2, 0],NEWLINE [.1, .5, 0, 0, .125]]NEWLINE # pylint: enable=bad-whitespaceNEWLINE # pyformat: enableNEWLINE max_tokens = 5NEWLINE expected_output_shape = [None, max_tokens]NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(NEWLINE max_tokens=None, output_mode=category_encoding.TFIDF)NEWLINE layer.set_num_elements(max_tokens)NEWLINE layer.set_tfidf_data(tfidf_data)NEWLINE int_data = layer(input_data)NEWLINE self.assertAllEqual(expected_output_shape, int_data.shape.as_list())NEWLINENEWLINE model = keras.Model(inputs=input_data, outputs=int_data)NEWLINE output_dataset = model.predict(input_array)NEWLINE self.assertAllClose(expected_output, output_dataset)NEWLINENEWLINENEWLINEclass CategoryEncodingModelBuildingTest(NEWLINE keras_parameterized.TestCase,NEWLINE preprocessing_test_utils.PreprocessingLayerTest):NEWLINENEWLINE @parameterized.named_parameters(NEWLINE {NEWLINE "testcase_name": "count_hard_max",NEWLINE "max_tokens": 5,NEWLINE "output_mode": category_encoding.COUNTNEWLINE }, {NEWLINE "testcase_name": "count_soft_max",NEWLINE "max_tokens": None,NEWLINE "output_mode": category_encoding.COUNTNEWLINE }, {NEWLINE "testcase_name": "binary_hard_max",NEWLINE "max_tokens": 5,NEWLINE "output_mode": category_encoding.BINARYNEWLINE }, {NEWLINE "testcase_name": "binary_soft_max",NEWLINE "max_tokens": None,NEWLINE "output_mode": category_encoding.BINARYNEWLINE }, {NEWLINE "testcase_name": "tfidf_hard_max",NEWLINE "max_tokens": 5,NEWLINE "output_mode": category_encoding.TFIDFNEWLINE }, {NEWLINE "testcase_name": "tfidf_soft_max",NEWLINE "max_tokens": None,NEWLINE "output_mode": category_encoding.TFIDFNEWLINE })NEWLINE def test_end_to_end_bagged_modeling(self, output_mode, max_tokens):NEWLINE tfidf_data = np.array([.03, .5, .25, .2, .125])NEWLINE input_array = np.array([[1, 2, 3, 1], [0, 3, 1, 0]])NEWLINENEWLINE input_data = keras.Input(shape=(None,), dtype=dtypes.int32)NEWLINE layer = get_layer_class()(max_tokens=max_tokens, output_mode=output_mode)NEWLINENEWLINE weights = []NEWLINE if max_tokens is None:NEWLINE layer.set_num_elements(5)NEWLINE if output_mode == category_encoding.TFIDF:NEWLINE weights.append(tfidf_data)NEWLINENEWLINE layer.set_weights(weights)NEWLINENEWLINE int_data = layer(input_data)NEWLINE float_data = backend.cast(int_data, dtype="float32")NEWLINE output_data = core.Dense(64)(float_data)NEWLINE model = keras.Model(inputs=input_data, outputs=output_data)NEWLINE _ = model.predict(input_array)NEWLINENEWLINENEWLINE@keras_parameterized.run_all_keras_modesNEWLINEclass CategoryEncodingCombinerTest(NEWLINE keras_parameterized.TestCase,NEWLINE preprocessing_test_utils.PreprocessingLayerTest):NEWLINENEWLINE def compare_idf_accumulators(self, a, b, msg=None):NEWLINE if a is None or b is None:NEWLINE self.assertAllEqual(a, b, msg=msg)NEWLINENEWLINE self.assertAllEqual(a.data, b.data, msg=msg)NEWLINENEWLINE if a.per_doc_count_dict is not None:NEWLINENEWLINE def per_doc_counts(accumulator):NEWLINE count_values = [NEWLINE count_dict["count"]NEWLINE for count_dict in accumulator.per_doc_count_dict.values()NEWLINE ]NEWLINE return dict(zip(accumulator.per_doc_count_dict.keys(), count_values))NEWLINENEWLINE self.assertAllEqual(per_doc_counts(a), per_doc_counts(b), msg=msg)NEWLINENEWLINE compare_accumulators = compare_idf_accumulatorsNEWLINENEWLINE def update_accumulator(self, accumulator, data):NEWLINE accumulator.data[1] = data["num_documents"]NEWLINE accumulator.data[0] = data["max_element"]NEWLINENEWLINE if "document_counts" in data:NEWLINE create_dict = lambda x: {"count": x, "last_doc_id": -1}NEWLINE idf_dict = {}NEWLINE for i, count in enumerate(data["document_counts"]):NEWLINE if count > 0:NEWLINE idf_dict[i] = create_dict(count)NEWLINENEWLINE accumulator.per_doc_count_dict.update(idf_dict)NEWLINENEWLINE return accumulatorNEWLINENEWLINE def test_combiner_api_compatibility_int_mode(self):NEWLINE data = np.array([[1, 2, 3, 4], [1, 2, 3, 0]])NEWLINE combiner = category_encoding._CategoryEncodingCombiner(compute_idf=False)NEWLINE expected_accumulator_output = {NEWLINE "max_element": np.array(4),NEWLINE "num_documents": np.array(2),NEWLINE }NEWLINE expected_extract_output = {NEWLINE "num_elements": np.array(5),NEWLINE }NEWLINE expected_accumulator = combiner._create_accumulator()NEWLINE expected_accumulator = self.update_accumulator(expected_accumulator,NEWLINE expected_accumulator_output)NEWLINE self.validate_accumulator_serialize_and_deserialize(combiner, data,NEWLINE expected_accumulator)NEWLINE self.validate_accumulator_uniqueness(combiner, data)NEWLINE self.validate_accumulator_extract(combiner, data, expected_extract_output)NEWLINENEWLINE def test_combiner_api_compatibility_tfidf_mode(self):NEWLINE data = np.array([[1, 2, 3, 4], [1, 2, 3, 0]])NEWLINE combiner = category_encoding._CategoryEncodingCombiner(compute_idf=True)NEWLINE expected_accumulator_output = {NEWLINE "max_element": np.array(4),NEWLINE "document_counts": np.array([1, 2, 2, 2, 1]),NEWLINE "num_documents": np.array(2),NEWLINE }NEWLINE expected_extract_output = {NEWLINE "num_elements": np.array(5),NEWLINE "idf": np.array([0.693147, 0.510826, 0.510826, 0.510826, 0.693147]),NEWLINE }NEWLINENEWLINE expected_accumulator = combiner._create_accumulator()NEWLINE expected_accumulator = self.update_accumulator(expected_accumulator,NEWLINE expected_accumulator_output)NEWLINE self.validate_accumulator_serialize_and_deserialize(combiner, data,NEWLINE expected_accumulator)NEWLINE self.validate_accumulator_uniqueness(combiner, data)NEWLINE self.validate_accumulator_extract(combiner, data, expected_extract_output)NEWLINENEWLINE # TODO(askerryryan): Add tests confirming equivalence to behavior ofNEWLINE # existing tf.keras.preprocessing.text.Tokenizer.NEWLINE @parameterized.named_parameters(NEWLINE {NEWLINE "testcase_name": "no_top_k",NEWLINE "data": np.array([[1, 2], [4, 2], [3], [4, 2]]),NEWLINE "expected_accumulator_output": {NEWLINE "max_element": np.array(4),NEWLINE "document_counts": np.array([0, 1, 3, 1, 2]),NEWLINE "num_documents": np.array(4),NEWLINE },NEWLINE "expected_extract_output": {NEWLINE "num_elements":NEWLINE np.array(5),NEWLINE "idf":NEWLINE np.array([1.609438, 1.098612, 0.693147, 1.098612, 0.847298]),NEWLINE },NEWLINE }, {NEWLINE "testcase_name": "single_element_per_row",NEWLINE "data": np.array([[1], [2], [4], [2], [3]]),NEWLINE "expected_accumulator_output": {NEWLINE "max_element": np.array(4),NEWLINE "document_counts": np.array([0, 1, 2, 1, 1]),NEWLINE "num_documents": np.array(5),NEWLINE },NEWLINE "expected_extract_output": {NEWLINE "num_elements":NEWLINE np.array(5),NEWLINE "idf":NEWLINE np.array([1.791759, 1.252763, 0.980829, 1.252763, 1.252763]),NEWLINE },NEWLINE })NEWLINE def test_combiner_computation(self,NEWLINE data,NEWLINE expected_accumulator_output,NEWLINE expected_extract_output,NEWLINE compute_idf=True):NEWLINE combiner = category_encoding._CategoryEncodingCombiner(NEWLINE compute_idf=compute_idf)NEWLINE expected_accumulator = combiner._create_accumulator()NEWLINE expected_accumulator = self.update_accumulator(expected_accumulator,NEWLINE expected_accumulator_output)NEWLINE self.validate_accumulator_computation(combiner, data, expected_accumulator)NEWLINE self.validate_accumulator_extract(combiner, data, expected_extract_output)NEWLINENEWLINE def test_1d_data(self):NEWLINE data = [1, 2, 3]NEWLINE cls = get_layer_class()NEWLINE layer = cls()NEWLINE layer.adapt(data)NEWLINE output = layer(data)NEWLINE self.assertListEqual(output.shape.as_list(), [3, 4])NEWLINENEWLINE def test_no_adapt_exception(self):NEWLINE cls = get_layer_class()NEWLINE layer = cls()NEWLINE with self.assertRaisesRegex(NEWLINE RuntimeError, r".*you need to call.*"):NEWLINE _ = layer([1, 2, 3])NEWLINENEWLINE def test_saving_loading(self):NEWLINE encoder = category_encoding.CategoryEncoding()NEWLINE encoder.adapt([1, 2, 3])NEWLINE model = keras.Sequential([encoder])NEWLINE model.save("/tmp/model", save_format="tf")NEWLINE loaded_model = keras.models.load_model("/tmp/model")NEWLINE self.assertAllClose(model.predict([[1]]), loaded_model.predict([[1]]))NEWLINENEWLINE def test_serialize(self):NEWLINE encoder = category_encoding.CategoryEncoding()NEWLINE encoder.adapt([1, 2, 3])NEWLINE model = keras.Sequential([encoder])NEWLINE _ = keras.models.clone_model(model)NEWLINENEWLINENEWLINEif __name__ == "__main__":NEWLINE test.main()NEWLINE
#!/usr/bin/env pythonNEWLINENEWLINE# Copyright (c) 2013 Intel Corporation. All rights reserved.NEWLINE# Use of this source code is governed by a BSD-style license that can beNEWLINE# found in the LICENSE file.NEWLINE# pylint: disable=F0401NEWLINENEWLINEimport osNEWLINEimport shutilNEWLINEimport sysNEWLINEfrom common_function import RemoveUnusedFilesInReleaseModeNEWLINENEWLINEdef Clean(dir_to_clean):NEWLINE if os.path.isdir(dir_to_clean):NEWLINE shutil.rmtree(dir_to_clean)NEWLINENEWLINENEWLINEdef PrepareFromChromium(target_dir):NEWLINE gyp_dir = os.path.join(target_dir, 'scripts', 'gyp')NEWLINE if not os.path.exists(gyp_dir):NEWLINE os.makedirs(gyp_dir)NEWLINE shutil.copytree('../build/android/gyp/util', os.path.join(gyp_dir, 'util'))NEWLINE shutil.copy('../build/android/gyp/ant.py', gyp_dir)NEWLINENEWLINENEWLINEdef PrepareFromXwalk(src_dir, target_dir):NEWLINE '''Prepare different files for app packaging tools. All resources are used byNEWLINE make_apk.py.NEWLINE '''NEWLINE # Get the dir of source code from src_dir: ../../.NEWLINE source_code_dir = os.path.dirname(os.path.dirname(src_dir))NEWLINENEWLINE # The directories for source and target .jar files.NEWLINE jar_src_dir = os.path.join(src_dir, 'lib.java')NEWLINE jar_target_dir = os.path.join(target_dir, 'libs')NEWLINENEWLINE # The directories for generated resources.NEWLINE gen_res_src_dir = os.path.join(src_dir, 'gen')NEWLINE gen_res_target_dir = os.path.join(target_dir, 'gen')NEWLINENEWLINE # The directory for source packaging tools.NEWLINE tools_src_dir = os.path.join(source_code_dir, 'xwalk/app/tools/android')NEWLINENEWLINE # The directories for source and target gyp.NEWLINE gyp_src_dir = os.path.join(tools_src_dir, 'gyp')NEWLINE gyp_target_dir = os.path.join(target_dir, 'scripts/gyp')NEWLINENEWLINE # The source file/directory list to be copied and the target directory list.NEWLINE source_target_list = [NEWLINE (os.path.join(source_code_dir, 'xwalk/VERSION'), target_dir),NEWLINENEWLINE # This jar is needed for 'javac' compile.NEWLINE (os.path.join(jar_src_dir, 'xwalk_app_runtime_java.jar'), jar_target_dir),NEWLINE (os.path.join(jar_src_dir, 'xwalk_core_embedded.dex.jar'), jar_target_dir),NEWLINENEWLINE # Native library, like libxwalkcore.so.NEWLINE (os.path.join(src_dir, 'xwalk_runtime_lib_apk/libs/x86'),NEWLINE os.path.join(target_dir, 'native_libs/x86/libs/x86')),NEWLINE (os.path.join(src_dir, 'xwalk_runtime_lib_apk/libs/armeabi-v7a'),NEWLINE os.path.join(target_dir, 'native_libs/armeabi-v7a/libs/armeabi-v7a')),NEWLINENEWLINE # Native source package(xwalk.pak) and related js files for extension.NEWLINE (os.path.join(src_dir, 'xwalk_runtime_lib/assets'),NEWLINE os.path.join(target_dir, 'native_libs_res')),NEWLINENEWLINE # Various Java resources.NEWLINE (os.path.join(source_code_dir, 'content/public/android/java/res'),NEWLINE os.path.join(target_dir, 'libs_res/content')),NEWLINE (os.path.join(source_code_dir, 'ui/android/java/res'),NEWLINE os.path.join(target_dir, 'libs_res/ui')),NEWLINE (os.path.join(source_code_dir, 'xwalk/runtime/android/java/res'),NEWLINE os.path.join(target_dir, 'libs_res/runtime')),NEWLINENEWLINE (os.path.join(gen_res_src_dir, 'ui_java/java_R'),NEWLINE os.path.join(gen_res_target_dir, 'ui_java/java_R')),NEWLINE (os.path.join(gen_res_src_dir, 'ui_java/res_crunched'),NEWLINE os.path.join(gen_res_target_dir, 'ui_java/res_crunched')),NEWLINE (os.path.join(gen_res_src_dir, 'ui_java/res_grit'),NEWLINE os.path.join(gen_res_target_dir, 'ui_java/res_grit')),NEWLINE (os.path.join(gen_res_src_dir, 'ui_java/res_v14_compatibility'),NEWLINE os.path.join(gen_res_target_dir, 'ui_java/res_v14_compatibility')),NEWLINENEWLINE (os.path.join(gen_res_src_dir, 'content_java/java_R'),NEWLINE os.path.join(gen_res_target_dir, 'content_java/java_R')),NEWLINE (os.path.join(gen_res_src_dir, 'content_java/res_crunched'),NEWLINE os.path.join(gen_res_target_dir, 'content_java/res_crunched')),NEWLINE (os.path.join(gen_res_src_dir, 'content_java/res_grit'),NEWLINE os.path.join(gen_res_target_dir, 'content_java/res_grit')),NEWLINE (os.path.join(gen_res_src_dir, 'content_java/res_v14_compatibility'),NEWLINE os.path.join(gen_res_target_dir, 'content_java/res_v14_compatibility')),NEWLINENEWLINE (os.path.join(gen_res_src_dir, 'xwalk_core_java/java_R'),NEWLINE os.path.join(gen_res_target_dir, 'xwalk_core_java/java_R')),NEWLINE (os.path.join(gen_res_src_dir, 'xwalk_core_java/res_crunched'),NEWLINE os.path.join(gen_res_target_dir, 'xwalk_core_java/res_crunched')),NEWLINE (os.path.join(gen_res_src_dir, 'xwalk_core_java/res_grit'),NEWLINE os.path.join(gen_res_target_dir, 'xwalk_core_java/res_grit')),NEWLINE (os.path.join(gen_res_src_dir, 'xwalk_core_java/res_v14_compatibility'),NEWLINE os.path.join(gen_res_target_dir, 'xwalk_core_java/res_v14_compatibility')),NEWLINENEWLINE # The app wrapper code. It's the template Java code.NEWLINE (os.path.join(source_code_dir, 'xwalk/app/android/app_template'),NEWLINE os.path.join(target_dir, 'app_src')),NEWLINENEWLINE # Copy below 5 files to overwrite the existing ones from Chromium.NEWLINE (os.path.join(gyp_src_dir, 'util/build_utils.py'),NEWLINE os.path.join(gyp_target_dir, 'util')),NEWLINE (os.path.join(gyp_src_dir, 'dex.py'), gyp_target_dir),NEWLINE (os.path.join(gyp_src_dir, 'finalize_apk.py'), gyp_target_dir),NEWLINE (os.path.join(gyp_src_dir, 'jar.py'), gyp_target_dir),NEWLINE (os.path.join(gyp_src_dir, 'javac.py'), gyp_target_dir),NEWLINENEWLINE # Build and python tools.NEWLINE (os.path.join(tools_src_dir, 'ant'),NEWLINE os.path.join(target_dir, 'scripts/ant')),NEWLINE (os.path.join(tools_src_dir, 'customize.py'), target_dir),NEWLINE (os.path.join(tools_src_dir, 'handle_permissions.py'), target_dir),NEWLINE (os.path.join(tools_src_dir, 'handle_xml.py'), target_dir),NEWLINE (os.path.join(tools_src_dir, 'make_apk.py'), target_dir),NEWLINE (os.path.join(tools_src_dir, 'manifest_json_parser.py'), target_dir),NEWLINE (os.path.join(tools_src_dir, 'parse_xpk.py'), target_dir)NEWLINE ]NEWLINENEWLINE for index in range(len(source_target_list)):NEWLINE source_path, target_path = source_target_list[index]NEWLINENEWLINE # Process source.NEWLINE if not os.path.exists(source_path):NEWLINE print ('The source path "%s" does not exist.' % source_path)NEWLINE continueNEWLINENEWLINE source_is_file = os.path.isfile(source_path)NEWLINENEWLINE # Process target.NEWLINE if source_is_file and not os.path.exists(target_path):NEWLINE os.makedirs(target_path)NEWLINENEWLINE # Do copy.NEWLINE if source_is_file:NEWLINE shutil.copy(source_path, target_path)NEWLINE else:NEWLINE shutil.copytree(source_path, target_path)NEWLINENEWLINE # Remove unused files.NEWLINE mode = os.path.basename(os.path.dirname(target_dir))NEWLINE RemoveUnusedFilesInReleaseMode(mode, os.path.join(target_dir, 'native_libs'))NEWLINENEWLINENEWLINEdef main(args):NEWLINE if len(args) != 1:NEWLINE print 'You must provide only one argument: folder to update'NEWLINE return 1NEWLINE target_dir = args[0]NEWLINE src_dir = os.path.dirname(target_dir)NEWLINE Clean(target_dir)NEWLINE PrepareFromChromium(target_dir)NEWLINE PrepareFromXwalk(src_dir, target_dir)NEWLINENEWLINENEWLINEif __name__ == '__main__':NEWLINE sys.exit(main(sys.argv[1:]))NEWLINE
import bpyNEWLINEfrom numpy import array, zeros, argmin, infNEWLINEfrom numpy.linalg import normNEWLINEfrom mathutils import *NEWLINENEWLINE####==========================================================================NEWLINE# DTW implementation courtesy of Pierre Rouanet:NEWLINE# http://github.com/pierre-rouanet/dtwNEWLINE# See examples that he postsNEWLINE####==========================================================================NEWLINENEWLINEdef dtw(x, y, dist=lambda x, y: norm(x - y, ord=1)):NEWLINE """ Computes the DTW of two sequences.NEWLINENEWLINE :param array x: N1*M arrayNEWLINE :param array y: N2*M arrayNEWLINE :param func dist: distance used as cost measure (default L1 norm)NEWLINENEWLINE Returns the minimum distance, the accumulated cost matrix and the wrap path.NEWLINENEWLINE """NEWLINE x = array(x)NEWLINE if len(x.shape) == 1:NEWLINE x = x.reshape(-1, 1)NEWLINE y = array(y)NEWLINE if len(y.shape) == 1:NEWLINE y = y.reshape(-1, 1)NEWLINENEWLINE r, c = len(x), len(y)NEWLINENEWLINE D = zeros((r + 1, c + 1))NEWLINE D[0, 1:] = infNEWLINE D[1:, 0] = infNEWLINENEWLINE for i in range(r):NEWLINE for j in range(c):NEWLINE D[i+1, j+1] = dist(x[i], y[j])NEWLINENEWLINE for i in range(r):NEWLINE for j in range(c):NEWLINE D[i+1, j+1] += min(D[i, j], D[i, j+1], D[i+1, j])NEWLINENEWLINE D = D[1:, 1:]NEWLINENEWLINE dist = D[-1, -1] / sum(D.shape)NEWLINENEWLINE return dist, D, _trackeback(D)NEWLINENEWLINENEWLINEdef _trackeback(D):NEWLINE i, j = array(D.shape) - 1NEWLINE p, q = [i], [j]NEWLINE while (i > 0 and j > 0):NEWLINE tb = argmin((D[i-1, j-1], D[i-1, j], D[i, j-1]))NEWLINENEWLINE if (tb == 0):NEWLINE i = i - 1NEWLINE j = j - 1NEWLINE elif (tb == 1):NEWLINE i = i - 1NEWLINE elif (tb == 2):NEWLINE j = j - 1NEWLINENEWLINE p.insert(0, i)NEWLINE q.insert(0, j)NEWLINENEWLINE p.insert(0, 0)NEWLINE q.insert(0, 0)NEWLINE return (array(p), array(q))NEWLINENEWLINE####==========================================================================NEWLINENEWLINE####==========================================================================NEWLINE# Get the rotation given mocap to search in, bonename, and frameNEWLINE# "action" is a blender Action, "bonename" is a string, "frame" is an intNEWLINE# blender Actions can be found in bpy.data.actionsNEWLINE####==========================================================================NEWLINEdef get_rotation(action, bonename, frame=1):NEWLINE rot = Euler([0,0,0])NEWLINE data_path = 'pose.bones["%s"].rotation_euler'%(bonename)NEWLINE for fc in action.fcurves:NEWLINE if fc.data_path == data_path:NEWLINE rot[fc.array_index] = fc.evaluate(frame)NEWLINE return(rot)NEWLINENEWLINE####==========================================================================NEWLINE# Creates a list containing the rotations of a boneNEWLINE# rot[i] equals the i+1th frame of animationNEWLINE# "action" is a blender Action, "bonename" is a stringNEWLINE####==========================================================================NEWLINEdef listRotation(action, bonename):NEWLINE rot = []NEWLINE for i in range(1, int(action.fcurves[0].range()[1]) + 1):NEWLINE rot.append(get_rotation(action, bonename, i))NEWLINE return rotNEWLINENEWLINE####==========================================================================NEWLINE# Replaces the existing rotation FCurves with the new computed rotationsNEWLINE# "action" is a blender Action, "bonename" is a string and NEWLINE# "rot" is a list of vectorsNEWLINE####==========================================================================NEWLINEdef replaceRotation(action, bonename, rot):NEWLINE data_path = 'pose.bones["%s"].rotation_euler'%(bonename)NEWLINE x = []NEWLINE y = []NEWLINE z = [] NEWLINE # Separate x, y, z values NEWLINE for i in range(len(rot)):NEWLINE x.append(rot[i][0])NEWLINE y.append(rot[i][1])NEWLINE z.append(rot[i][2])NEWLINENEWLINE # Obtain curves of interestNEWLINE for curve in action.fcurves:NEWLINE if curve.data_path == data_path:NEWLINE if curve.array_index == 0:NEWLINE c0 = curveNEWLINE elif curve.array_index == 1:NEWLINE c1 = curveNEWLINE elif curve.array_index == 2:NEWLINE c2 = curveNEWLINENEWLINE # Access keyframesNEWLINE c0k = c0.keyframe_pointsNEWLINE c1k = c1.keyframe_pointsNEWLINE c2k = c2.keyframe_pointsNEWLINENEWLINE # Replace existing keyframes with new onesNEWLINE for i in range(1, len(x)+1):NEWLINE c0k.insert(i, x[i-1], {'REPLACE'})NEWLINE c1k.insert(i, y[i-1], {'REPLACE'})NEWLINE c2k.insert(i, z[i-1], {'REPLACE'})NEWLINENEWLINE####==========================================================================NEWLINE# Creates the final curve based on determined pathNEWLINE# Based on current undrestanding of the functionNEWLINE# "curve" is a list of vectors and "path" is a list of intsNEWLINE####==========================================================================NEWLINEdef match(curve, path):NEWLINE t = []NEWLINE for i in path:NEWLINE t.append(curve[i])NEWLINE return tNEWLINENEWLINE####==========================================================================NEWLINE# Run DTW alg to find shortest pathNEWLINE# Primarily interested in Path for the time beingNEWLINE# Uses it to generate the new rotationsNEWLINE# "curveA", "curveB" are a list of vectorsNEWLINE####==========================================================================NEWLINEdef applyDTW(curveA, curveB):NEWLINE dist, cost, path = dtw(curveA, curveB)NEWLINE curveA = match(curveA, path[0])NEWLINE curveB = match(curveB, path[1])NEWLINE return curveA, curveBNEWLINENEWLINE####==========================================================================NEWLINE# Example UsageNEWLINE####==========================================================================NEWLINEif __name__ == "__main__":NEWLINE action1 = bpy.data.actions[0] # Mocap 1NEWLINE action2 = bpy.data.actions[1] # Mocap 2NEWLINE NEWLINE # Comparison jointsNEWLINE bodyBone = 'lHand'NEWLINE handBone = 'Hand'NEWLINENEWLINE # Get the rotation data (vectors)NEWLINE rotA = listRotation(action1, bodyBone)NEWLINE rotB = listRotation(action2, handBone)NEWLINENEWLINE # Process rotA and rotBNEWLINE rotA, rotB = applyDTW(rotA, rotB)NEWLINENEWLINE # Replace originalsNEWLINE replaceRotation(action1, bodyBone, rotA)NEWLINE replaceRotation(action2, handBone, rotB)NEWLINENEWLINENEWLINE
#!/usr/bin/env pythonNEWLINE# -*- coding: utf-8 -*-NEWLINE# **************************************************************************NEWLINE# Copyright © 2016 jianglinNEWLINE# File Name: __init__.pyNEWLINE# Author: jianglinNEWLINE# Email: xiyang0807@gmail.comNEWLINE# Created: 2016-11-25 17:45:36 (CST)NEWLINE# Last Update:星期五 2016-11-25 17:45:36 (CST)NEWLINE# By:NEWLINE# Description:NEWLINE# **************************************************************************NEWLINE
#!/usr/bin/env python3NEWLINE# -*- coding: utf-8 -*-NEWLINE# vim: set et sw=4 ts=4:NEWLINENEWLINEfrom PyQt5.QtWidgets import *NEWLINEfrom PyQt5.QtCore import *NEWLINEfrom PyQt5.QtGui import *NEWLINENEWLINEfrom ayat import (suar_names, suar_lengths)NEWLINENEWLINENEWLINEclass SuraAyatDialog(QDialog):NEWLINENEWLINE submit = pyqtSignal(int,int,int,bool,bool)NEWLINENEWLINE def __init__(s):NEWLINE super().__init__()NEWLINE s.setWindowTitle("تسميع آيات القرآن الكريم")NEWLINE #NEWLINE s.setLayoutDirection(Qt.RightToLeft)NEWLINE #NEWLINE s.suraEntry = QComboBox()NEWLINE s.fromEntry = QSpinBox()NEWLINE s.toEntry = QSpinBox()NEWLINE s.darkEntry = QCheckBox("الوضع الليلي")NEWLINE s.darkEntry.setStyleSheet("text-align: right")NEWLINE s.numberEntry = QCheckBox("ترقيم الآيات")NEWLINE s.numberEntry.setStyleSheet("text-align: right")NEWLINE #NEWLINE for (i,n) in enumerate(suar_names):NEWLINE s.suraEntry.addItem("%s - %d" % (n, i+1))NEWLINE #NEWLINE def suraChanged(i):NEWLINE s.fromEntry.setMaximum(suar_lengths[i])NEWLINE s.toEntry.setMaximum(suar_lengths[i])NEWLINE s.toEntry.setValue(suar_lengths[i])NEWLINE #NEWLINE s.fromEntry.setMinimum(1)NEWLINE s.toEntry.setMinimum(1)NEWLINE suraChanged(0)NEWLINE #NEWLINE s.suraEntry.setEditable(True)NEWLINE s.suraEntry.lineEdit().selectAll() # to just type the first characters of a sura's nameNEWLINE s.suraEntry.setInsertPolicy(QComboBox.NoInsert)NEWLINE s.suraEntry.currentIndexChanged.connect(suraChanged)NEWLINE #NEWLINE s.form = QFormLayout()NEWLINE for (name, entry) in (NEWLINE ("السورة:", s.suraEntry),NEWLINE ("من آية:", s.fromEntry),NEWLINE ("إلى آية:", s.toEntry),NEWLINE ):NEWLINE s.form.addRow(name, entry)NEWLINE #NEWLINE s.okbtn = QPushButton("انطلق")NEWLINE s.okbtn.setDefault(True)NEWLINE def ok():NEWLINE s.submit.emit(NEWLINE s.suraEntry.currentIndex(),NEWLINE s.fromEntry.value()-1,NEWLINE s.toEntry.value(),NEWLINE s.darkEntry.isChecked(),NEWLINE s.numberEntry.isChecked(),NEWLINE )NEWLINE s.accept()NEWLINE s.okbtn.clicked.connect(ok)NEWLINE s.box = QVBoxLayout()NEWLINE s.box.addLayout(s.form)NEWLINE s.box.addWidget(s.darkEntry, alignment=Qt.AlignCenter)NEWLINE s.box.addWidget(s.numberEntry, alignment=Qt.AlignCenter)NEWLINE s.box.addWidget(s.okbtn, alignment=Qt.AlignCenter)NEWLINE #NEWLINE s.setLayout(s.box)NEWLINENEWLINE
# test importing of required modules and sit2standpy packageNEWLINENEWLINENEWLINEdef test_numpy():NEWLINE import numpyNEWLINENEWLINE returnNEWLINENEWLINENEWLINEdef test_scipy():NEWLINE import scipyNEWLINENEWLINE returnNEWLINENEWLINENEWLINEdef test_pywt():NEWLINE import pywtNEWLINENEWLINE returnNEWLINENEWLINENEWLINEdef test_pysit2stand():NEWLINE import sit2standpyNEWLINE from sit2standpy import Sit2Stand, detectors, mov_stats, Transition, TransitionQuantifier, \NEWLINE AccelerationFilter, process_timestamps, __version__NEWLINE from sit2standpy.detectors import Stillness, DisplacementNEWLINENEWLINE returnNEWLINE
#!/usr/bin/env python2NEWLINE#NEWLINE# Distributed under the MIT/X11 software license, see the accompanyingNEWLINE# file COPYING or http://www.opensource.org/licenses/mit-license.php.NEWLINE#NEWLINENEWLINEfrom test_framework.mininode import *NEWLINEfrom test_framework.test_framework import BitcoinTestFrameworkNEWLINEfrom test_framework.util import *NEWLINEimport loggingNEWLINENEWLINE'''NEWLINEIn this test we connect to one node over p2p, send it numerous inv's, andNEWLINEcompare the resulting number of getdata requests to a max allowed value. WeNEWLINEtest for exceeding 128 blocks in flight, which was the limit an 0.9 client willNEWLINEreach. [0.10 clients shouldn't request more than 16 from a single peer.]NEWLINE'''NEWLINEMAX_REQUESTS = 128NEWLINENEWLINEclass TestManager(NodeConnCB):NEWLINE # set up NodeConnCB callbacks, overriding base classNEWLINE def on_getdata(self, conn, message):NEWLINE self.log.debug("got getdata %s" % repr(message))NEWLINE # Log the requestsNEWLINE for inv in message.inv:NEWLINE if inv.hash not in self.blockReqCounts:NEWLINE self.blockReqCounts[inv.hash] = 0NEWLINE self.blockReqCounts[inv.hash] += 1NEWLINENEWLINE def on_close(self, conn):NEWLINE if not self.disconnectOkay:NEWLINE raise EarlyDisconnectError(0)NEWLINENEWLINE def __init__(self):NEWLINE NodeConnCB.__init__(self)NEWLINE self.log = logging.getLogger("BlockRelayTest")NEWLINE self.create_callback_map()NEWLINENEWLINE def add_new_connection(self, connection):NEWLINE self.connection = connectionNEWLINE self.blockReqCounts = {}NEWLINE self.disconnectOkay = FalseNEWLINENEWLINE def run(self):NEWLINE try:NEWLINE fail = FalseNEWLINE self.connection.rpc.generate(1) # Leave IBDNEWLINENEWLINE numBlocksToGenerate = [ 8, 16, 128, 1024 ]NEWLINE for count in range(len(numBlocksToGenerate)):NEWLINE current_invs = []NEWLINE for i in range(numBlocksToGenerate[count]):NEWLINE current_invs.append(CInv(2, random.randrange(0, 1<<256)))NEWLINE if len(current_invs) >= 50000:NEWLINE self.connection.send_message(msg_inv(current_invs))NEWLINE current_invs = []NEWLINE if len(current_invs) > 0:NEWLINE self.connection.send_message(msg_inv(current_invs))NEWLINE NEWLINE # Wait and see how many blocks were requestedNEWLINE time.sleep(2)NEWLINENEWLINE total_requests = 0NEWLINE with mininode_lock:NEWLINE for key in self.blockReqCounts:NEWLINE total_requests += self.blockReqCounts[key]NEWLINE if self.blockReqCounts[key] > 1:NEWLINE raise AssertionError("Error, test failed: block %064x requested more than once" % key)NEWLINE if total_requests > MAX_REQUESTS:NEWLINE raise AssertionError("Error, too many blocks (%d) requested" % total_requests)NEWLINE print "Round %d: success (total requests: %d)" % (count, total_requests)NEWLINE except AssertionError as e:NEWLINE print "TEST FAILED: ", e.argsNEWLINENEWLINE self.disconnectOkay = TrueNEWLINE self.connection.disconnect_node()NEWLINENEWLINE NEWLINEclass MaxBlocksInFlightTest(BitcoinTestFramework):NEWLINE def add_options(self, parser):NEWLINE parser.add_option("--testbinary", dest="testbinary",NEWLINE default=os.getenv("LEMONCOIND", "lemoncoind"),NEWLINE help="Binary to test max block requests behavior")NEWLINENEWLINE def setup_chain(self):NEWLINE print "Initializing test directory "+self.options.tmpdirNEWLINE initialize_chain_clean(self.options.tmpdir, 1)NEWLINENEWLINE def setup_network(self):NEWLINE self.nodes = start_nodes(1, self.options.tmpdir, NEWLINE extra_args=[['-debug', '-whitelist=127.0.0.1']],NEWLINE binary=[self.options.testbinary])NEWLINENEWLINE def run_test(self):NEWLINE test = TestManager()NEWLINE test.add_new_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test))NEWLINE NetworkThread().start() # Start up network handling in another threadNEWLINE test.run()NEWLINENEWLINEif __name__ == '__main__':NEWLINE MaxBlocksInFlightTest().main()NEWLINE
"""Application-specific settings."""NEWLINEimport osNEWLINEfrom django.conf import settings as _settingsNEWLINEfrom django.core.exceptions import ImproperlyConfiguredNEWLINENEWLINENEWLINE###############################################################################NEWLINE# Single settings.NEWLINE###############################################################################NEWLINEclass Setting(object):NEWLINE """Settings option helper class."""NEWLINE def __init__(self, **kwargs):NEWLINE """Initializer.NEWLINENEWLINE :kwarg default: Override default for getting.NEWLINE :type default: ``object``NEWLINE :kwarg from_env: Allow variable from evironment.NEWLINE :type from_env: ``bool``NEWLINE :kwarg valid_set: Set of valid values for setting.NEWLINE :type valid_set: ``set``NEWLINE """NEWLINE self.from_env = kwargs.get('from_env', False)NEWLINE self.default = kwargs.get('default', None)NEWLINE self.valid_set = kwargs.get('valid_set', None)NEWLINENEWLINE def validate(self, name, value):NEWLINE """Validate and return a value."""NEWLINENEWLINE if self.valid_set and value not in self.valid_set:NEWLINE raise ImproperlyConfigured(NEWLINE "%s: \"%s\" is not a valid setting (choose between %s)." %NEWLINE (name, value, ", ".join("\"%s\"" % x for x in self.valid_set)))NEWLINENEWLINE return valueNEWLINENEWLINE def env_clean(self, value): # pylint: disable=R0201NEWLINE """Clean / convert environment variable to proper type."""NEWLINE return valueNEWLINENEWLINE def get(self, name, default=None):NEWLINE """Get value."""NEWLINE default = default if default is not None else self.defaultNEWLINE try:NEWLINE value = getattr(_settings, name)NEWLINE except AttributeError:NEWLINE value = os.environ.get(name, default) if self.from_env else defaultNEWLINE # Convert env variable.NEWLINE if value != default:NEWLINE value = self.env_clean(value)NEWLINENEWLINE return self.validate(name, value)NEWLINENEWLINENEWLINEclass BoolSetting(Setting):NEWLINE """Boolean setting.."""NEWLINE def env_clean(self, value):NEWLINE """Clean / convert environment variable to proper type."""NEWLINE return self.parse_bool(value)NEWLINENEWLINE @classmethodNEWLINE def parse_bool(cls, value, default=None):NEWLINE """Convert ``string`` or ``bool`` to ``bool``."""NEWLINE if value is None:NEWLINE return defaultNEWLINENEWLINE elif isinstance(value, bool):NEWLINE return valueNEWLINENEWLINE elif isinstance(value, basestring):NEWLINE if value == 'True':NEWLINE return TrueNEWLINE elif value == 'False':NEWLINE return FalseNEWLINENEWLINE raise Exception("Value %s is not boolean." % value)NEWLINENEWLINENEWLINE###############################################################################NEWLINE# Settings wrapper.NEWLINE###############################################################################NEWLINEclass Settings(object):NEWLINE """Cloud Browser application settings.NEWLINENEWLINE This class wraps the "real" Django settings object, so can be used instead.NEWLINE The additional cloud browser settings are as follows:NEWLINENEWLINE .. note::NEWLINE **Environment Variables**: Certain credential settings can come from OSNEWLINE environment variables instead of from a settings file value to open upNEWLINE more options for secrets management. Values that can be set in theNEWLINE environment are designated with an "(*Env*)" notation.NEWLINENEWLINE Setting a value this way could be done, e.g.::NEWLINENEWLINE $ export CLOUD_BROWSER_AWS_ACCOUNT="my_account"NEWLINE $ export CLOUD_BROWSER_AWS_SECRET_KEY="my_secret"NEWLINE $ # ... start django application with environment variables.NEWLINENEWLINE **Datastore Settings**:NEWLINENEWLINE * ``CLOUD_BROWSER_DATASTORE``: Choice of datastore (see values below).NEWLINENEWLINE **Amazon Web Services**: Configure AWS S3 as backing datastore.NEWLINENEWLINE * ``CLOUD_BROWSER_DATASTORE = "AWS"``NEWLINE * ``CLOUD_BROWSER_AWS_ACCOUNT``: Account name. (*Env*)NEWLINE * ``CLOUD_BROWSER_AWS_SECRET_KEY``: Account API secret key. (*Env*)NEWLINENEWLINE **Google Storage for Developers**: Configure Google Storage as backingNEWLINE datastore.NEWLINENEWLINE * ``CLOUD_BROWSER_DATASTORE = "Google"``NEWLINE * ``CLOUD_BROWSER_GS_ACCOUNT``: Account name. (*Env*)NEWLINE * ``CLOUD_BROWSER_GS_SECRET_KEY``: Account API secret key. (*Env*)NEWLINENEWLINE **Rackspace**: Configure Rackspace Cloud Files as backing datastore.NEWLINENEWLINE * ``CLOUD_BROWSER_DATASTORE = "Rackspace"``NEWLINE * ``CLOUD_BROWSER_RACKSPACE_ACCOUNT``: Account name. (*Env*)NEWLINE * ``CLOUD_BROWSER_RACKSPACE_SECRET_KEY``: Account API secret key. (*Env*)NEWLINE * ``CLOUD_BROWSER_RACKSPACE_SERVICENET``: Boolean designating whether orNEWLINE not to use Rackspace's servicenet (i.e., the private interface on aNEWLINE Cloud Server). (*Env*)NEWLINE * ``CLOUD_BROWSER_RACKSPACE_AUTHURL``: Alternative authorization server,NEWLINE for use, e.g., with `OpenStack <http://www.openstack.org/>`_ instead ofNEWLINE Rackspace. (*Env*)NEWLINENEWLINE **Filesystem**: Configure simple filesystem mock datastore.NEWLINENEWLINE * ``CLOUD_BROWSER_DATASTORE = "Filesystem"``NEWLINE * ``CLOUD_BROWSER_FILESYSTEM_ROOT``: Filesystem root to serve from.NEWLINENEWLINE **View Permissions**: A standard Django view decorator object can beNEWLINE specified, which is wrapped for all browsing / viewing view -- for example,NEWLINE to limit views to logged in members, use ``login_required`` and for staffNEWLINE only, use ``staff_member_required``. Note that either a real decoratorNEWLINE function or a fully-qualifid string path are acceptable, so you can use,NEWLINE e.g., "django.contrib.admin.views.decorators.staff_member_required" insteadNEWLINE which might help with certain settings.py import-order-related issues.NEWLINENEWLINE * ``CLOUD_BROWSER_VIEW_DECORATOR``: View decorator or fully-qualifiedNEWLINE string path.NEWLINENEWLINE **Container Permissions**: Cloud browser allows a very rudimentary formNEWLINE of access control at the container level with white and black lists.NEWLINE If the white list is set, only container names in the white list areNEWLINE allowed. If the white list is unset, then any container name *not* inNEWLINE the black list is permitted. All name matching is exact (no regularNEWLINE expressions, etc.).NEWLINENEWLINE * ``CLOUD_BROWSER_CONTAINER_WHITELIST``: White list of names. (Iterable)NEWLINE * ``CLOUD_BROWSER_CONTAINER_BLACKLIST``: Black list of names. (Iterable)NEWLINENEWLINE **General**: Other settings.NEWLINENEWLINE * ``CLOUD_BROWSER_DEFAULT_LIST_LIMIT``: Default number of objects toNEWLINE diplay per browser page.NEWLINE * ``CLOUD_BROWSER_STATIC_MEDIA_DIR``: If this applications static mediaNEWLINE (found in ``app_media``) is served up under the ``settings.MEDIA_ROOT``,NEWLINE then set a relative path from the root, and the static media will be usedNEWLINE instead of a Django-based static view fallback.NEWLINE """NEWLINE #: Valid datastore types.NEWLINE DATASTORES = set((NEWLINE 'AWS',NEWLINE 'Google',NEWLINE 'Rackspace',NEWLINE 'Filesystem',NEWLINE ))NEWLINENEWLINE #: Settings dictionary of accessor callables.NEWLINE SETTINGS = {NEWLINE # Datastore choice.NEWLINE 'CLOUD_BROWSER_DATASTORE': Setting(NEWLINE default='Filesystem',NEWLINE valid_set=DATASTORESNEWLINE ),NEWLINENEWLINE # Amazon Web Services S3 datastore settings.NEWLINE 'CLOUD_BROWSER_AWS_ACCOUNT': Setting(from_env=True),NEWLINE 'CLOUD_BROWSER_AWS_SECRET_KEY': Setting(from_env=True),NEWLINENEWLINE # Google Storage for Developers datastore settings.NEWLINE 'CLOUD_BROWSER_GS_ACCOUNT': Setting(from_env=True),NEWLINE 'CLOUD_BROWSER_GS_SECRET_KEY': Setting(from_env=True),NEWLINENEWLINE # Rackspace datastore settings.NEWLINE 'CLOUD_BROWSER_RACKSPACE_ACCOUNT': Setting(from_env=True),NEWLINE 'CLOUD_BROWSER_RACKSPACE_SECRET_KEY': Setting(from_env=True),NEWLINE 'CLOUD_BROWSER_RACKSPACE_SERVICENET': BoolSetting(from_env=True),NEWLINE 'CLOUD_BROWSER_RACKSPACE_AUTHURL': BoolSetting(from_env=True),NEWLINENEWLINE # Filesystem datastore settings.NEWLINE 'CLOUD_BROWSER_FILESYSTEM_ROOT': Setting(),NEWLINENEWLINE # View permissions.NEWLINE 'CLOUD_BROWSER_VIEW_DECORATOR': Setting(),NEWLINENEWLINE # Permissions lists for containers.NEWLINE 'CLOUD_BROWSER_CONTAINER_WHITELIST': Setting(),NEWLINE 'CLOUD_BROWSER_CONTAINER_BLACKLIST': Setting(),NEWLINENEWLINE # Browser settings.NEWLINE 'CLOUD_BROWSER_DEFAULT_LIST_LIMIT': Setting(default=20),NEWLINENEWLINE # Static media root.NEWLINE 'CLOUD_BROWSER_STATIC_MEDIA_DIR': Setting(),NEWLINE }NEWLINENEWLINE def __init__(self):NEWLINE """Initializer."""NEWLINE self.__container_whitelist = NoneNEWLINE self.__container_blacklist = NoneNEWLINENEWLINE def __getattr__(self, name, default=None):NEWLINE """Get setting."""NEWLINE if name in self.SETTINGS:NEWLINE return self.SETTINGS[name].get(name, default)NEWLINENEWLINE # Use real Django settings.NEWLINE return getattr(_settings, name, default)NEWLINENEWLINE @propertyNEWLINE def _container_whitelist(self):NEWLINE """Container whitelist."""NEWLINE if self.__container_whitelist is None:NEWLINE self.__container_whitelist = \NEWLINE set(self.CLOUD_BROWSER_CONTAINER_WHITELIST or [])NEWLINE return self.__container_whitelistNEWLINENEWLINE @propertyNEWLINE def _container_blacklist(self):NEWLINE """Container blacklist."""NEWLINE if self.__container_blacklist is None:NEWLINE self.__container_blacklist = \NEWLINE set(self.CLOUD_BROWSER_CONTAINER_BLACKLIST or [])NEWLINE return self.__container_blacklistNEWLINENEWLINE def container_permitted(self, name):NEWLINE """Return whether or not a container is permitted.NEWLINENEWLINE :param name: Container name.NEWLINE :return: ``True`` if container is permitted.NEWLINE :rtype: ``bool``NEWLINE """NEWLINE white = self._container_whitelistNEWLINE black = self._container_blacklistNEWLINE return name not in black and (not white or name in white)NEWLINENEWLINE @propertyNEWLINE def app_media_url(self):NEWLINE """Get application media root from real media root URL."""NEWLINE url = NoneNEWLINE media_dir = self.CLOUD_BROWSER_STATIC_MEDIA_DIRNEWLINE if media_dir:NEWLINE url = os.path.join(self.MEDIA_URL, media_dir).rstrip('/') + '/'NEWLINENEWLINE return urlNEWLINENEWLINE @propertyNEWLINE def app_media_doc_root(self): # pylint: disable=R0201NEWLINE """Get application media document (file) root."""NEWLINE app_dir = os.path.abspath(os.path.dirname(__file__))NEWLINE media_root = os.path.join(app_dir, 'media')NEWLINENEWLINE return media_rootNEWLINENEWLINENEWLINEsettings = Settings() # pylint: disable=C0103NEWLINE
import platformNEWLINEfrom setuptools import setupNEWLINEfrom setuptools import find_packagesNEWLINEfrom setuptools import ExtensionNEWLINENEWLINENEWLINEextra_compile_args = [NEWLINE '-std=c++11',NEWLINE '-O3',NEWLINE '-Wall',NEWLINE '-Wextra',NEWLINE '-Wconversion',NEWLINE '-fno-strict-aliasing',NEWLINE '-fno-rtti',NEWLINE]NEWLINENEWLINEif platform.system() == 'Darwin':NEWLINE extra_compile_args += ['-mmacosx-version-min=10.7', '-stdlib=libc++']NEWLINENEWLINENEWLINEsetup(NEWLINE name="python-rocksdb",NEWLINE version='0.6.8',NEWLINE description="Python bindings for RocksDB",NEWLINE keywords='rocksdb',NEWLINE author='Ming Hsuan Tu',NEWLINE author_email="qrnnis2623891@gmail.com",NEWLINE url="https://github.com/twmht/python-rocksdb",NEWLINE license='BSD License',NEWLINE setup_requires=['setuptools>=25', 'Cython>=0.20'],NEWLINE install_requires=['setuptools>=25'],NEWLINE package_dir={'rocksdb': 'rocksdb'},NEWLINE packages=find_packages('.'),NEWLINE ext_modules=[Extension(NEWLINE 'rocksdb._rocksdb',NEWLINE ['rocksdb/_rocksdb.pyx'],NEWLINE extra_compile_args=extra_compile_args,NEWLINE language='c++',NEWLINE libraries=['rocksdb', 'snappy', 'bz2', 'zstd', 'lz4'],NEWLINE )],NEWLINE extras_require={NEWLINE "doc": ['sphinx_rtd_theme', 'sphinx'],NEWLINE "test": ['pytest'],NEWLINE },NEWLINE include_package_data=TrueNEWLINE)NEWLINE
#!/usr/bin/env python3NEWLINE# -*- coding: utf-8 -*-NEWLINEimport sysNEWLINENEWLINEif __name__ == '__main__':NEWLINE X1 = int(input('Введите x1 '))NEWLINE Y1 = int(input('Введите y1 '))NEWLINE X2 = int(input('Введите x2 '))NEWLINE Y2 = int(input('Введите y2 '))NEWLINENEWLINE if X1 == -X2 and Y1 == -Y2:NEWLINE print('Точки симметричны относительно начала координат')NEWLINE elif X1 == -X2 and Y1 == Y2:NEWLINE print('Точки симметричны относительно оси Y')NEWLINE elif X1 == X2 and Y1 == -Y2:NEWLINE print('Точки симметричны относительно оси X')NEWLINE else:NEWLINE print('Точки не симметричны', file=sys.stderr)NEWLINE exit(1)NEWLINE
# -*- coding: utf-8 -*-NEWLINEimport osNEWLINEimport sysNEWLINENEWLINEcmd = 'coverage run `which djangocms-helper` aldryn_boilerplates test --cms --extra-settings=test_settings'NEWLINENEWLINEsys.exit(os.system(cmd))NEWLINE
# MIT LICENSENEWLINE#NEWLINE# Copyright 1997 - 2020 by IXIA KeysightNEWLINE#NEWLINE# Permission is hereby granted, free of charge, to any person obtaining a copyNEWLINE# of this software and associated documentation files (the "Software"),NEWLINE# to deal in the Software without restriction, including without limitationNEWLINE# the rights to use, copy, modify, merge, publish, distribute, sublicense,NEWLINE# and/or sell copies of the Software, and to permit persons to whom theNEWLINE# Software is furnished to do so, subject to the following conditions:NEWLINE#NEWLINE# The above copyright notice and this permission notice shall be included inNEWLINE# all copies or substantial portions of the Software.NEWLINE#NEWLINE# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ORNEWLINE# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,NEWLINE# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THENEWLINE# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHERNEWLINE# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,NEWLINE# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS INNEWLINE# THE SOFTWARE. NEWLINEfrom ixnetwork_restpy.base import BaseNEWLINEfrom ixnetwork_restpy.files import FilesNEWLINENEWLINENEWLINEclass SubTlv(Base):NEWLINE """Sub Tlv containerNEWLINE The SubTlv class encapsulates a list of subTlv resources that are managed by the system.NEWLINE A list of resources can be retrieved from the server using the SubTlv.find() method.NEWLINE """NEWLINENEWLINE __slots__ = ()NEWLINE _SDM_NAME = 'subTlv'NEWLINE _SDM_ATT_MAP = {NEWLINE 'Description': 'description',NEWLINE 'EnablePerSession': 'enablePerSession',NEWLINE 'IsEnabled': 'isEnabled',NEWLINE 'Name': 'name',NEWLINE }NEWLINENEWLINE def __init__(self, parent):NEWLINE super(SubTlv, self).__init__(parent)NEWLINENEWLINE @propertyNEWLINE def Value(self):NEWLINE """NEWLINE ReturnsNEWLINE -------NEWLINE - obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.value_ac1d7b13584a86b9cf1c28dca3390bca.Value): An instance of the Value classNEWLINENEWLINE RaisesNEWLINE ------NEWLINE - ServerError: The server has encountered an uncategorized error conditionNEWLINE """NEWLINE from ixnetwork_restpy.testplatform.sessions.ixnetwork.topology.tlvprofile.value_ac1d7b13584a86b9cf1c28dca3390bca import ValueNEWLINE return Value(self)._select()NEWLINENEWLINE @propertyNEWLINE def Description(self):NEWLINE """NEWLINE ReturnsNEWLINE -------NEWLINE - str: Description of the tlvNEWLINE """NEWLINE return self._get_attribute(self._SDM_ATT_MAP['Description'])NEWLINE @Description.setterNEWLINE def Description(self, value):NEWLINE self._set_attribute(self._SDM_ATT_MAP['Description'], value)NEWLINENEWLINE @propertyNEWLINE def EnablePerSession(self):NEWLINE """NEWLINE ReturnsNEWLINE -------NEWLINE - obj(ixnetwork_restpy.multivalue.Multivalue): Enable TLV per sessionNEWLINE """NEWLINE from ixnetwork_restpy.multivalue import MultivalueNEWLINE return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['EnablePerSession']))NEWLINENEWLINE @propertyNEWLINE def IsEnabled(self):NEWLINE """NEWLINE ReturnsNEWLINE -------NEWLINE - bool: Enables/disables this tlvNEWLINE """NEWLINE return self._get_attribute(self._SDM_ATT_MAP['IsEnabled'])NEWLINE @IsEnabled.setterNEWLINE def IsEnabled(self, value):NEWLINE self._set_attribute(self._SDM_ATT_MAP['IsEnabled'], value)NEWLINENEWLINE @propertyNEWLINE def Name(self):NEWLINE """NEWLINE ReturnsNEWLINE -------NEWLINE - str: Name of the tlvNEWLINE """NEWLINE return self._get_attribute(self._SDM_ATT_MAP['Name'])NEWLINE @Name.setterNEWLINE def Name(self, value):NEWLINE self._set_attribute(self._SDM_ATT_MAP['Name'], value)NEWLINENEWLINE def update(self, Description=None, IsEnabled=None, Name=None):NEWLINE """Updates subTlv resource on the server.NEWLINENEWLINE This method has some named parameters with a type: obj (Multivalue).NEWLINE The Multivalue class has documentation that details the possible values for those named parameters.NEWLINENEWLINE ArgsNEWLINE ----NEWLINE - Description (str): Description of the tlvNEWLINE - IsEnabled (bool): Enables/disables this tlvNEWLINE - Name (str): Name of the tlvNEWLINENEWLINE RaisesNEWLINE ------NEWLINE - ServerError: The server has encountered an uncategorized error conditionNEWLINE """NEWLINE return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))NEWLINENEWLINE def find(self, Description=None, IsEnabled=None, Name=None):NEWLINE """Finds and retrieves subTlv resources from the server.NEWLINENEWLINE All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve subTlv resources from the server.NEWLINE To retrieve an exact match ensure the parameter value starts with ^ and ends with $NEWLINE By default the find method takes no parameters and will retrieve all subTlv resources from the server.NEWLINENEWLINE ArgsNEWLINE ----NEWLINE - Description (str): Description of the tlvNEWLINE - IsEnabled (bool): Enables/disables this tlvNEWLINE - Name (str): Name of the tlvNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE - self: This instance with matching subTlv resources retrieved from the server available through an iterator or indexNEWLINENEWLINE RaisesNEWLINE ------NEWLINE - ServerError: The server has encountered an uncategorized error conditionNEWLINE """NEWLINE return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))NEWLINENEWLINE def read(self, href):NEWLINE """Retrieves a single instance of subTlv data from the server.NEWLINENEWLINE ArgsNEWLINE ----NEWLINE - href (str): An href to the instance to be retrievedNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE - self: This instance with the subTlv resources from the server available through an iterator or indexNEWLINENEWLINE RaisesNEWLINE ------NEWLINE - NotFoundError: The requested resource does not exist on the serverNEWLINE - ServerError: The server has encountered an uncategorized error conditionNEWLINE """NEWLINE return self._read(href)NEWLINENEWLINE def get_device_ids(self, PortNames=None, EnablePerSession=None):NEWLINE """Base class infrastructure that gets a list of subTlv device ids encapsulated by this object.NEWLINENEWLINE Use the optional regex parameters in the method to refine the list of device ids encapsulated by this object.NEWLINENEWLINE ArgsNEWLINE ----NEWLINE - PortNames (str): optional regex of port namesNEWLINE - EnablePerSession (str): optional regex of enablePerSessionNEWLINENEWLINE ReturnsNEWLINE -------NEWLINE - list(int): A list of device ids that meets the regex criteria provided in the method parametersNEWLINENEWLINE RaisesNEWLINE ------NEWLINE - ServerError: The server has encountered an uncategorized error conditionNEWLINE """NEWLINE return self._get_ngpf_device_ids(locals())NEWLINE
def to_binary(int_digit, length=4):NEWLINE """Convert a digit into binary string.NEWLINENEWLINE Arguments:NEWLINE int_digit {str} -- the digit needed to be convertNEWLINENEWLINE Keyword Arguments:NEWLINE length {int} -- length of converted string (default: {4})NEWLINENEWLINE Returns:NEWLINE str -- a string with specific length converted from intNEWLINENEWLINE """NEWLINE format_str = '{:0>%ds}' % lengthNEWLINE return format_str.format(bin(int(int_digit))[2:])NEWLINENEWLINENEWLINEdef checkio(data):NEWLINE data = ['{:0>2s}'.format(i) for i in data.split(':')]NEWLINE bin_data = [[to_binary(i[0], 3), to_binary(i[1])] for i in data]NEWLINE bin_data = list(map(lambda x: ' '.join(x), bin_data))NEWLINE bin_data = ' : '.join(bin_data)NEWLINE return bin_data.replace('0', '.').replace('1', '-')[1:]NEWLINENEWLINENEWLINE# These "asserts" using only for self-checkingNEWLINE# and not necessary for auto-testingNEWLINEif __name__ == '__main__':NEWLINE assert checkio("10:37:49") == ".- .... : .-- .--- : -.. -..-", "First Test"NEWLINE assert checkio("21:34:56") == "-. ...- : .-- .-.. : -.- .--.", "Second Test"NEWLINE assert checkio("11:10:12") == ".- ...- : ..- .... : ..- ..-.", "Third Test"NEWLINE assert checkio("23:59:59") == "-. ..-- : -.- -..- : -.- -..-", "Fourth Test"NEWLINE
# import the generic views you want, and the models NEWLINE# they apply to.NEWLINEfrom django.views.generic import ListViewNEWLINENEWLINE# Import the models you want to use.NEWLINEfrom snippets.models import SnippetNEWLINENEWLINE# Create a class for your model that subclassesNEWLINE# the generic view you want. This serves as anNEWLINE# index view.NEWLINEclass SnippetListView(ListView):NEWLINE # Finally, tell the generic view what modelNEWLINE # it applies to, and which template to use.NEWLINE model = SnippetNEWLINE template_name = 'snippets/index.html'NEWLINENEWLINE# ==============================================NEWLINENEWLINE# In your urls.py, you'll need to update the NEWLINE# corresponding route. It'll look like this.NEWLINEurls(r'^index/$', views.SnippetListView.as_view())NEWLINE
words =[ "aback","abaft","abandoned","abashed","aberrant","abhorrent","abiding","abject","ablaze","able","abnormal","aboard","aboriginal","abortive","abounding","abrasive","abrupt","absent","absorbed","absorbing","abstracted","absurd","abundant","abusive","accept","acceptable","accessible","accidental","account","accurate","achiever","acid","acidic","acoustic","acoustics","acrid","act","action","activity","actor","actually","ad hoc","adamant","adaptable","add","addicted","addition","adhesive","adjoining","adjustment","admire","admit","adorable","adventurous","advertisement","advice","advise","afford","afraid","aftermath","afternoon","afterthought","aggressive","agonizing","agree","agreeable","agreement","ahead","air","airplane","airport","ajar","alarm","alcoholic","alert","alike","alive","alleged","allow","alluring","aloof","amazing","ambiguous","ambitious","amount","amuck","amuse","amused","amusement","amusing","analyze","ancient","anger","angle","angry","animal","animated","announce","annoy","annoyed","annoying","answer","ants","anxious","apathetic","apologise","apparatus","apparel","appear","applaud","appliance","appreciate","approval","approve","aquatic","arch","argue","argument","arithmetic","arm","army","aromatic","arrange","arrest","arrive","arrogant","art","ashamed","ask","aspiring","assorted","astonishing","attach","attack","attempt","attend","attract","attraction","attractive","aunt","auspicious","authority","automatic","available","average","avoid","awake","aware","awesome","awful","axiomatic","babies","baby","back","bad","badge","bag","bait","bake","balance","ball","ban","bang","barbarous","bare","base","baseball","bashful","basin","basket","basketball","bat","bath","bathe","battle","bawdy","bead","beam","bear","beautiful","bed","bedroom","beds","bee","beef","befitting","beg","beginner","behave","behavior","belief","believe","bell","belligerent","bells","belong","beneficial","bent","berry","berserk","best","better","bewildered","big","bike","bikes","billowy","bird","birds","birth","birthday","bit","bite","bite-sized","bitter","bizarre","black","black-and-white","blade","bleach","bless","blind","blink","blood","bloody","blot","blow","blue","blue-eyed","blush","blushing","board","boast","boat","boil","boiling","bolt","bomb","bone","book","books","boorish","boot","border","bore","bored","boring","borrow","bottle","bounce","bouncy","boundary","boundless","bow","box","boy","brainy","brake","branch","brash","brass","brave","brawny","breakable","breath","breathe","breezy","brick","bridge","brief","bright","broad","broken","brother","brown","bruise","brush","bubble","bucket","building","bulb","bump","bumpy","burly","burn","burst","bury","bushes","business","bustling","busy","butter","button","buzz","cabbage","cable","cactus","cagey","cake","cakes","calculate","calculating","calculator","calendar","call","callous","calm","camera","camp","can","cannon","canvas","cap","capable","capricious","caption","car","card","care","careful","careless","caring","carpenter","carriage","carry","cars","cart","carve","cast","cat","cats","cattle","cause","cautious","cave","ceaseless","celery","cellar","cemetery","cent","certain","chalk","challenge","chance","change","changeable","channel","charge","charming","chase","cheap","cheat","check","cheer","cheerful","cheese","chemical","cherries","cherry","chess","chew","chicken","chickens","chief","childlike","children","chilly","chin","chivalrous","choke","chop","chubby","chunky","church","circle","claim","clam","clammy","clap","class","classy","clean","clear","clever","clip","cloistered","close","closed","cloth","cloudy","clover","club","clumsy","cluttered","coach","coal","coast","coat","cobweb","coherent","coil","cold","collar","collect","color","colorful","colossal","colour","comb","combative","comfortable","command","committee","common","communicate","company","compare","comparison","compete","competition","complain","complete","complex","concentrate","concern","concerned","condemned","condition","confess","confuse","confused","connect","connection","conscious","consider","consist","contain","continue","control","cooing","cook","cool","cooperative","coordinated","copper","copy","corn","correct","cough","count","country","courageous","cover","cow","cowardly","cows","crabby","crack","cracker","crash","crate","craven","crawl","crayon","crazy","cream","creator","creature","credit","creepy","crib","crime","crook","crooked","cross","crow","crowd","crowded","crown","cruel","crush","cry","cub","cuddly","cultured","cumbersome","cup","cure","curious","curl","curly","current","curtain","curve","curved","curvy","cushion","cut","cute","cycle","cynical","dad","daffy","daily","dam","damage","damaged","damaging","damp","dance","dangerous","dapper","dare","dark","dashing","daughter","day","dazzling","dead","deadpan","deafening","dear","death","debonair","debt","decay","deceive","decide","decision","decisive","decorate","decorous","deep","deeply","deer","defeated","defective","defiant","degree","delay","delicate","delicious","delight","delightful","delirious","deliver","demonic","depend","dependent","depressed","deranged","describe","descriptive","desert","deserted","deserve","design","desire","desk","destroy","destruction","detail","detailed","detect","determined","develop","development","devilish","didactic","different","difficult","digestion","diligent","dime","dinner","dinosaurs","direction","direful","dirt","dirty","disagree","disagreeable","disappear","disapprove","disarm","disastrous","discover","discovery","discreet","discussion","disgusted","disgusting","disillusioned","dislike","dispensable","distance","distinct","distribution","disturbed","divergent","divide","division","dizzy","dock","doctor","dog","dogs","doll","dolls","domineering","donkey","door","double","doubt","doubtful","downtown","drab","draconian","drag","drain","dramatic","drawer","dream","dreary","dress","drink","drip","driving","drop","drown","drum","drunk","dry","duck","ducks","dull","dust","dusty","dynamic","dysfunctional","eager","ear","early","earn","earsplitting","earth","earthquake","earthy","easy","eatable","economic","edge","educate","educated","education","effect","efficacious","efficient","egg","eggnog","eggs","eight","elastic","elated","elbow","elderly","electric","elegant","elfin","elite","embarrass","embarrassed","eminent","employ","empty","enchanted","enchanting","encourage","encouraging","end","endurable","energetic","engine","enjoy","enormous","enter","entertain","entertaining","enthusiastic","envious","equable","equal","erect","erratic","error","escape","ethereal","evanescent","evasive","even","event","examine","example","excellent","exchange","excite","excited","exciting","exclusive","excuse","exercise","exist","existence","exotic","expand","expansion","expect","expensive","experience","expert","explain","explode","extend","extra-large","extra-small","exuberant","exultant","eye","eyes","fabulous","face","fact","fade","faded","fail","faint","fair","fairies","faithful","fall","fallacious","false","familiar","famous","fanatical","fancy","fang","fantastic","far","far-flung","farm","fascinated","fast","fasten","fat","faulty","fax","fear","fearful","fearless","feeble","feeling","feigned","female","fence","fertile","festive","fetch","few","field","fierce","file","fill","film","filthy","fine","finger","finicky","fire","fireman","first","fish","fit","five","fix","fixed","flag","flagrant","flaky","flame","flap","flash","flashy","flat","flavor","flawless","flesh","flight","flimsy","flippant","float","flock","flood","floor","flow","flower","flowers","flowery","fluffy","fluttering","fly","foamy","fog","fold","follow","food","fool","foolish","foot","force","foregoing","forgetful","fork","form","fortunate","found","four","fowl","fragile","frail","frame","frantic","free","freezing","frequent","fresh","fretful","friction","friend","friendly","friends","frighten","frightened","frightening","frog","frogs","front","fruit","fry","fuel","full","fumbling","functional","funny","furniture","furry","furtive","future","futuristic","fuzzy","gabby","gainful","gamy","gaping","garrulous","gate","gather","gaudy","gaze","geese","general","gentle","ghost","giant","giants","giddy","gifted","gigantic","giraffe","girl","girls","glamorous","glass","gleaming","glib","glistening","glorious","glossy","glove","glow","glue","godly","gold","good","goofy","gorgeous","government","governor","grab","graceful","grade","grain","grandfather","grandiose","grandmother","grape","grass","grate","grateful","gratis","gray","grease","greasy","great","greedy","green","greet","grey","grieving","grin","grip","groan","groovy","grotesque","grouchy","ground","group","growth","grubby","gruesome","grumpy","guarantee","guard","guarded","guess","guide","guiltless","guitar","gullible","gun","gusty","guttural","habitual","hair","haircut","half","hall","hallowed","halting","hammer","hand","handle","hands","handsome","handsomely","handy","hang","hanging","hapless","happen","happy","harass","harbor","hard","hard-to-find","harm","harmonious","harmony","harsh","hat","hate","hateful","haunt","head","heady","heal","health","healthy","heap","heartbreaking","heat","heavenly","heavy","hellish","help","helpful","helpless","hesitant","hideous","high","high-pitched","highfalutin","hilarious","hill","hissing","historical","history","hobbies","hole","holiday","holistic","hollow","home","homeless","homely","honey","honorable","hook","hop","hope","horn","horrible","horse","horses","hose","hospitable","hospital","hot","hour","house","houses","hover","hug","huge","hulking","hum","humdrum","humor","humorous","hungry","hunt","hurried","hurry","hurt","hushed","husky","hydrant","hypnotic","hysterical","ice","icicle","icky","icy","idea","identify","idiotic","ignorant","ignore","ill","ill-fated","ill-informed","illegal","illustrious","imaginary","imagine","immense","imminent","impartial","imperfect","impolite","important","imported","impossible","impress","improve","impulse","incandescent","include","income","incompetent","inconclusive","increase","incredible","industrious","industry","inexpensive","infamous","influence","inform","inject","injure","ink","innate","innocent","inquisitive","insect","insidious","instinctive","instruct","instrument","insurance","intelligent","intend","interest","interesting","interfere","internal","interrupt","introduce","invent","invention","invincible","invite","irate","iron","irritate","irritating","island","itch","itchy","jaded","jagged","jail","jam","jar","jazzy","jealous","jeans","jelly","jellyfish","jewel","jittery","jobless","jog","join","joke","jolly","joyous","judge","judicious","juggle","juice","juicy","jumbled","jump","jumpy","juvenile","kaput","keen","kettle","key","kick","kill","kind","kindhearted","kindly","kiss","kittens","kitty","knee","kneel","knife","knit","knock","knot","knotty","knowing","knowledge","knowledgeable","known","label","labored","laborer","lace","lackadaisical","lacking","ladybug","lake","lame","lamentable","lamp","land","language","languid","large","last","late","laugh","laughable","launch","lavish","lazy","lean","learn","learned","leather","left","leg","legal","legs","lethal","letter","letters","lettuce","level","lewd","library","license","lick","lie","light","lighten","like","likeable","limit","limping","line","linen","lip","liquid","list","listen","literate","little","live","lively","living","load","loaf","lock","locket","lonely","long","long-term","longing","look","loose","lopsided","loss","loud","loutish","love","lovely","loving","low","lowly","lucky","ludicrous","lumber","lumpy","lunch","lunchroom","lush","luxuriant","lying","lyrical","macabre","machine","macho","maddening","madly","magenta","magic","magical","magnificent","maid","mailbox","majestic","makeshift","male","malicious","mammoth","man","manage","maniacal","many","marble","march","mark","marked","market","married","marry","marvelous","mask","mass","massive","match","mate","material","materialistic","matter","mature","meal","mean","measly","measure","meat","meaty","meddle","medical","meek","meeting","mellow","melodic","melt","melted","memorize","memory","men","mend","merciful","mere","mess up","messy","metal","mice","middle","mighty","military","milk","milky","mind","mindless","mine","miniature","minister","minor","mint","minute","miscreant","miss","mist","misty","mitten","mix","mixed","moan","moaning","modern","moldy","mom","momentous","money","monkey","month","moon","moor","morning","mother","motion","motionless","mountain","mountainous","mourn","mouth","move","muddle","muddled","mug","multiply","mundane","murder","murky","muscle","mushy","mute","mysterious","nail","naive","name","nappy","narrow","nasty","nation","natural","naughty","nauseating","near","neat","nebulous","necessary","neck","need","needle","needless","needy","neighborly","nerve","nervous","nest","new","next","nice","nifty","night","nimble","nine","nippy","nod","noise","noiseless","noisy","nonchalant","nondescript","nonstop","normal","north","nose","nostalgic","nosy","note","notebook","notice","noxious","null","number","numberless","numerous","nut","nutritious","nutty","oafish","oatmeal","obedient","obeisant","obese","obey","object","obnoxious","obscene","obsequious","observant","observation","observe","obsolete","obtain","obtainable","occur","ocean","oceanic","odd","offbeat","offend","offer","office","oil","old","old-fashioned","omniscient","one","onerous","open","opposite","optimal","orange","oranges","order","ordinary","organic","ossified","outgoing","outrageous","outstanding","oval","oven","overconfident","overflow","overjoyed","overrated","overt","overwrought","owe","own","pack","paddle","page","pail","painful","painstaking","paint","pale","paltry","pan","pancake","panicky","panoramic","paper","parallel","parcel","parched","park","parsimonious","part","partner","party","pass","passenger","past","paste","pastoral","pat","pathetic","pause","payment","peace","peaceful","pear","peck","pedal","peel","peep","pen","pencil","penitent","perfect","perform","periodic","permissible","permit","perpetual","person","pest","pet","petite","pets","phobic","phone","physical","picayune","pick","pickle","picture","pie","pies","pig","pigs","pin","pinch","pine","pink","pipe","piquant","pizzas","place","placid","plain","plan","plane","planes","plant","plantation","plants","plastic","plate","plausible","play","playground","pleasant","please","pleasure","plot","plough","plucky","plug","pocket","point","pointless","poised","poison","poke","polish","polite","political","pollution","poor","pop","popcorn","porter","position","possess","possessive","possible","post","pot","potato","pour","powder","power","powerful","practice","pray","preach","precede","precious","prefer","premium","prepare","present","preserve","press","pretend","pretty","prevent","previous","price","pricey","prick","prickly","print","private","probable","produce","productive","profit","profuse","program","promise","property","prose","protect","protective","protest","proud","provide","psychedelic","psychotic","public","puffy","pull","pump","pumped","punch","puncture","punish","punishment","puny","purple","purpose","purring","push","pushy","puzzled","puzzling","quack","quaint","quarrelsome","quarter","quartz","queen","question","questionable","queue","quick","quickest","quicksand","quiet","quill","quilt","quince","quirky","quiver","quixotic","quizzical","rabbit","rabbits","rabid","race","racial","radiate","ragged","rail","railway","rain","rainstorm","rainy","raise","rake","rambunctious","rampant","range","rapid","rare","raspy","rat","rate","ratty","ray","reach","reaction","reading","ready","real","realize","reason","rebel","receipt","receive","receptive","recess","recognise","recondite","record","red","reduce","redundant","reflect","reflective","refuse","regret","regular","reign","reject","rejoice","relation","relax","release","relieved","religion","rely","remain","remarkable","remember","remind","reminiscent","remove","repair","repeat","replace","reply","report","representative","reproduce","repulsive","request","rescue","resolute","resonant","respect","responsible","rest","retire","return","reward","rhetorical","rhyme","rhythm","rice","rich","riddle","rifle","right","righteous","rightful","rigid","ring","rings","rinse","ripe","risk","ritzy","river","road","roasted","rob","robin","robust","rock","rod","roll","romantic","roof","room","roomy","root","rose","rot","rotten","rough","round","route","royal","rub","ruddy","rude","ruin","rule","run","rural","rush","rustic","ruthless","sable","sack","sad","safe","sail","salt","salty","same","sand","sassy","satisfy","satisfying","save","savory","saw","scale","scandalous","scarce","scare","scarecrow","scared","scarf","scary","scatter","scattered","scene","scent","school","science","scientific","scintillating","scissors","scold","scorch","scrape","scratch","scrawny","scream","screeching","screw","scribble","scrub","sea","seal","search","seashore","seat","second","second-hand","secret","secretary","secretive","sedate","seed","seemly","selection","selective","self","selfish","sense","separate","serious","servant","serve","settle","shade","shaggy","shake","shaky","shallow","shame","shape","share","sharp","shave","sheep","sheet","shelf","shelter","shiny","ship","shirt","shiver","shivering","shock","shocking","shoe","shoes","shop","short","show","shrill","shrug","shut","shy","sick","side","sidewalk","sigh","sign","signal","silent","silk","silky","silly","silver","simple","simplistic","sin","sincere","sink","sip","sister","sisters","six","size","skate","ski","skillful","skin","skinny","skip","skirt","sky","slap","slave","sleep","sleepy","sleet","slim","slimy","slip","slippery","slope","sloppy","slow","small","smart","smash","smell","smelly","smile","smiling","smoggy","smoke","smooth","snail","snails","snake","snakes","snatch","sneaky","sneeze","sniff","snobbish","snore","snotty","snow","soak","soap","society","sock","soda","sofa","soft","soggy","solid","somber","son","song","songs","soothe","sophisticated","sordid","sore","sort","sound","soup","sour","space","spade","spare","spark","sparkle","sparkling","special","spectacular","spell","spicy","spiders","spiffy","spiky","spill","spiritual","spiteful","splendid","spoil","sponge","spooky","spoon","spot","spotless","spotted","spotty","spray","spring","sprout","spurious","spy","squalid","square","squash","squeak","squeal","squealing","squeamish","squeeze","squirrel","stage","stain","staking","stale","stamp","standing","star","stare","start","statement","station","statuesque","stay","steadfast","steady","steam","steel","steep","steer","stem","step","stereotyped","stew","stick","sticks","sticky","stiff","stimulating","stingy","stir","stitch","stocking","stomach","stone","stop","store","stormy","story","stove","straight","strange","stranger","strap","straw","stream","street","strengthen","stretch","string","strip","striped","stroke","strong","structure","stuff","stupendous","stupid","sturdy","subdued","subsequent","substance","substantial","subtract","succeed","successful","succinct","suck","sudden","suffer","sugar","suggest","suggestion","suit","sulky","summer","sun","super","superb","superficial","supply","support","suppose","supreme","surprise","surround","suspect","suspend","swanky","sweater","sweet","sweltering","swift","swim","swing","switch","symptomatic","synonymous","system","table","taboo","tacit","tacky","tail","talented","talk","tall","tame","tan","tangible","tangy","tank","tap","tart","taste","tasteful","tasteless","tasty","tawdry","tax","teaching","team","tearful","tease","tedious","teeny","teeny-tiny","teeth","telephone","telling","temper","temporary","tempt","ten","tendency","tender","tense","tent","tenuous","terrible","terrific","terrify","territory","test","tested","testy","texture","thank","thankful","thaw","theory","therapeutic","thick","thin","thing","things","thinkable","third","thirsty","thought","thoughtful","thoughtless","thread","threatening","three","thrill","throat","throne","thumb","thunder","thundering","tick","ticket","tickle","tidy","tie","tiger","tight","tightfisted","time","tin","tiny","tip","tire","tired","tiresome","title","toad","toe","toes","tomatoes","tongue","tooth","toothbrush","toothpaste","toothsome","top","torpid","touch","tough","tour","tow","towering","town","toy","toys","trace","trade","trail","train","trains","tramp","tranquil","transport","trap","trashy","travel","tray","treat","treatment","tree","trees","tremble","tremendous","trick","tricky","trip","trite","trot","trouble","troubled","trousers","truck","trucks","truculent","true","trust","truthful","try","tub","tug","tumble","turkey","turn","twig","twist","two","type","typical","ubiquitous","ugliest","ugly","ultra","umbrella","unable","unaccountable","unadvised","unarmed","unbecoming","unbiased","uncle","uncovered","understood","underwear","undesirable","undress","unequal","unequaled","uneven","unfasten","unhealthy","uninterested","unique","unit","unite","unkempt","unknown","unlock","unnatural","unpack","unruly","unsightly","unsuitable","untidy","unused","unusual","unwieldy","unwritten","upbeat","uppity","upset","uptight","use","used","useful","useless","utopian","utter","uttermost","vacation","vacuous","vagabond","vague","valuable","value","van","vanish","various","vase","vast","vegetable","veil","vein","vengeful","venomous","verdant","verse","versed","vessel","vest","victorious","view","vigorous","violent","violet","visit","visitor","vivacious","voice","voiceless","volatile","volcano","volleyball","voracious","voyage","vulgar","wacky","waggish","wail","wait","waiting","wakeful","walk","wall","wander","wandering","want","wanting","war","warlike","warm","warn","wary","wash","waste","wasteful","watch","water","watery","wave","waves","wax","way","weak","wealth","wealthy","weary","weather","week","weigh","weight","welcome","well-groomed","well-made","well-off","well-to-do","wet","wheel","whimsical","whine","whip","whirl","whisper","whispering","whistle","white","whole","wholesale","wicked","wide","wide-eyed","wiggly","wild","wilderness","willing","wind","window","windy","wine","wing","wink","winter","wipe","wire","wiry","wise","wish","wistful","witty","wobble","woebegone","woman","womanly","women","wonder","wonderful","wood","wooden","wool","woozy","word","work","workable","worm","worried","worry","worthless","wound","wrap","wrathful","wreck","wren","wrench","wrestle","wretched","wriggle","wrist","writer","writing","wrong","wry","x-ray","yak","yam","yard","yarn","yawn","year","yell","yellow","yielding","yoke","young","youthful","yummy","zany","zealous","zebra","zephyr","zesty","zinc","zip","zipper","zippy","zonked","zoo","zoom"]NEWLINE
import osNEWLINEimport timeNEWLINEimport statNEWLINEimport jsonNEWLINEimport zlibNEWLINEimport typingNEWLINEfrom typing import List, Sequence, MutableSequence, OptionalNEWLINEfrom collections import UserDictNEWLINEfrom hashlib import sha256NEWLINEfrom operator import attrgetterNEWLINEfrom torba.client.hash import better_aes_encrypt, better_aes_decryptNEWLINENEWLINEif typing.TYPE_CHECKING:NEWLINE from torba.client import basemanager, baseaccount, baseledgerNEWLINENEWLINENEWLINEclass TimestampedPreferences(UserDict):NEWLINENEWLINE def __getitem__(self, key):NEWLINE return self.data[key]['value']NEWLINENEWLINE def __setitem__(self, key, value):NEWLINE self.data[key] = {NEWLINE 'value': value,NEWLINE 'ts': time.time()NEWLINE }NEWLINENEWLINE def __repr__(self):NEWLINE return repr(self.to_dict_without_ts())NEWLINENEWLINE def to_dict_without_ts(self):NEWLINE return {NEWLINE key: value['value'] for key, value in self.data.items()NEWLINE }NEWLINENEWLINE @propertyNEWLINE def hash(self):NEWLINE return sha256(json.dumps(self.data).encode()).digest()NEWLINENEWLINE def merge(self, other: dict):NEWLINE for key, value in other.items():NEWLINE if key in self.data and value['ts'] < self.data[key]['ts']:NEWLINE continueNEWLINE self.data[key] = valueNEWLINENEWLINENEWLINEclass Wallet:NEWLINE """ The primary role of Wallet is to encapsulate a collectionNEWLINE of accounts (seed/private keys) and the spending rules / settingsNEWLINE for the coins attached to those accounts. Wallets are representedNEWLINE by physical files on the filesystem.NEWLINE """NEWLINENEWLINE preferences: TimestampedPreferencesNEWLINENEWLINE def __init__(self, name: str = 'Wallet', accounts: MutableSequence['baseaccount.BaseAccount'] = None,NEWLINE storage: 'WalletStorage' = None, preferences: dict = None) -> None:NEWLINE self.name = nameNEWLINE self.accounts = accounts or []NEWLINE self.storage = storage or WalletStorage()NEWLINE self.preferences = TimestampedPreferences(preferences or {})NEWLINENEWLINE @propertyNEWLINE def id(self):NEWLINE if self.storage.path:NEWLINE return os.path.basename(self.storage.path)NEWLINE return self.nameNEWLINENEWLINE def add_account(self, account: 'baseaccount.BaseAccount'):NEWLINE self.accounts.append(account)NEWLINENEWLINE def generate_account(self, ledger: 'baseledger.BaseLedger') -> 'baseaccount.BaseAccount':NEWLINE return ledger.account_class.generate(ledger, self)NEWLINENEWLINE @propertyNEWLINE def default_account(self) -> Optional['baseaccount.BaseAccount']:NEWLINE for account in self.accounts:NEWLINE return accountNEWLINE return NoneNEWLINENEWLINE def get_account_or_default(self, account_id: str) -> Optional['baseaccount.BaseAccount']:NEWLINE if account_id is None:NEWLINE return self.default_accountNEWLINE return self.get_account_or_error(account_id)NEWLINENEWLINE def get_account_or_error(self, account_id: str) -> 'baseaccount.BaseAccount':NEWLINE for account in self.accounts:NEWLINE if account.id == account_id:NEWLINE return accountNEWLINE raise ValueError(f"Couldn't find account: {account_id}.")NEWLINENEWLINE def get_accounts_or_all(self, account_ids: List[str]) -> Sequence['baseaccount.BaseAccount']:NEWLINE return [NEWLINE self.get_account_or_error(account_id)NEWLINE for account_id in account_idsNEWLINE ] if account_ids else self.accountsNEWLINENEWLINE async def get_detailed_accounts(self, **kwargs):NEWLINE ledgers = {}NEWLINE for i, account in enumerate(self.accounts):NEWLINE details = await account.get_details(**kwargs)NEWLINE details['is_default'] = i == 0NEWLINE ledger_id = account.ledger.get_id()NEWLINE ledgers.setdefault(ledger_id, [])NEWLINE ledgers[ledger_id].append(details)NEWLINE return ledgersNEWLINENEWLINE @classmethodNEWLINE def from_storage(cls, storage: 'WalletStorage', manager: 'basemanager.BaseWalletManager') -> 'Wallet':NEWLINE json_dict = storage.read()NEWLINE wallet = cls(NEWLINE name=json_dict.get('name', 'Wallet'),NEWLINE preferences=json_dict.get('preferences', {}),NEWLINE storage=storageNEWLINE )NEWLINE account_dicts: Sequence[dict] = json_dict.get('accounts', [])NEWLINE for account_dict in account_dicts:NEWLINE ledger = manager.get_or_create_ledger(account_dict['ledger'])NEWLINE ledger.account_class.from_dict(ledger, wallet, account_dict)NEWLINE return walletNEWLINENEWLINE def to_dict(self):NEWLINE return {NEWLINE 'version': WalletStorage.LATEST_VERSION,NEWLINE 'name': self.name,NEWLINE 'preferences': self.preferences.data,NEWLINE 'accounts': [a.to_dict() for a in self.accounts]NEWLINE }NEWLINENEWLINE def save(self):NEWLINE self.storage.write(self.to_dict())NEWLINENEWLINE @propertyNEWLINE def hash(self) -> bytes:NEWLINE h = sha256()NEWLINE h.update(self.preferences.hash)NEWLINE for account in sorted(self.accounts, key=attrgetter('id')):NEWLINE h.update(account.hash)NEWLINE return h.digest()NEWLINENEWLINE def pack(self, password):NEWLINE new_data = json.dumps(self.to_dict())NEWLINE new_data_compressed = zlib.compress(new_data.encode())NEWLINE return better_aes_encrypt(password, new_data_compressed)NEWLINENEWLINE @classmethodNEWLINE def unpack(cls, password, encrypted):NEWLINE decrypted = better_aes_decrypt(password, encrypted)NEWLINE decompressed = zlib.decompress(decrypted)NEWLINE return json.loads(decompressed)NEWLINENEWLINE def merge(self, manager: 'basemanager.BaseWalletManager',NEWLINE password: str, data: str) -> List['baseaccount.BaseAccount']:NEWLINE added_accounts = []NEWLINE decrypted_data = self.unpack(password, data)NEWLINE self.preferences.merge(decrypted_data.get('preferences', {}))NEWLINE for account_dict in decrypted_data['accounts']:NEWLINE ledger = manager.get_or_create_ledger(account_dict['ledger'])NEWLINE _, _, pubkey = ledger.account_class.keys_from_dict(ledger, account_dict)NEWLINE account_id = pubkey.addressNEWLINE local_match = NoneNEWLINE for local_account in self.accounts:NEWLINE if account_id == local_account.id:NEWLINE local_match = local_accountNEWLINE breakNEWLINE if local_match is not None:NEWLINE local_match.merge(account_dict)NEWLINE else:NEWLINE new_account = ledger.account_class.from_dict(ledger, self, account_dict)NEWLINE added_accounts.append(new_account)NEWLINE return added_accountsNEWLINENEWLINE @propertyNEWLINE def is_locked(self) -> bool:NEWLINE for account in self.accounts:NEWLINE if account.encrypted:NEWLINE return TrueNEWLINE return FalseNEWLINENEWLINE def unlock(self, password):NEWLINE for account in self.accounts:NEWLINE if account.encrypted:NEWLINE account.decrypt(password)NEWLINE return TrueNEWLINENEWLINE def lock(self):NEWLINE for account in self.accounts:NEWLINE if not account.encrypted:NEWLINE assert account.password is not None, "account was never encrypted"NEWLINE account.encrypt(account.password)NEWLINE return TrueNEWLINENEWLINE @propertyNEWLINE def is_encrypted(self) -> bool:NEWLINE for account in self.accounts:NEWLINE if account.serialize_encrypted:NEWLINE return TrueNEWLINE return FalseNEWLINENEWLINE def decrypt(self):NEWLINE for account in self.accounts:NEWLINE account.serialize_encrypted = FalseNEWLINE self.save()NEWLINE return TrueNEWLINENEWLINE def encrypt(self, password):NEWLINE for account in self.accounts:NEWLINE if not account.encrypted:NEWLINE account.encrypt(password)NEWLINE account.serialize_encrypted = TrueNEWLINE self.save()NEWLINE self.unlock(password)NEWLINE return TrueNEWLINENEWLINENEWLINEclass WalletStorage:NEWLINENEWLINE LATEST_VERSION = 1NEWLINENEWLINE def __init__(self, path=None, default=None):NEWLINE self.path = pathNEWLINE self._default = default or {NEWLINE 'version': self.LATEST_VERSION,NEWLINE 'name': 'My Wallet',NEWLINE 'preferences': {},NEWLINE 'accounts': []NEWLINE }NEWLINENEWLINE def read(self):NEWLINE if self.path and os.path.exists(self.path):NEWLINE with open(self.path, 'r') as f:NEWLINE json_data = f.read()NEWLINE json_dict = json.loads(json_data)NEWLINE if json_dict.get('version') == self.LATEST_VERSION and \NEWLINE set(json_dict) == set(self._default):NEWLINE return json_dictNEWLINE else:NEWLINE return self.upgrade(json_dict)NEWLINE else:NEWLINE return self._default.copy()NEWLINENEWLINE def upgrade(self, json_dict):NEWLINE json_dict = json_dict.copy()NEWLINE version = json_dict.pop('version', -1)NEWLINE if version == -1:NEWLINE passNEWLINE upgraded = self._default.copy()NEWLINE upgraded.update(json_dict)NEWLINE return json_dictNEWLINENEWLINE def write(self, json_dict):NEWLINENEWLINE json_data = json.dumps(json_dict, indent=4, sort_keys=True)NEWLINE if self.path is None:NEWLINE return json_dataNEWLINENEWLINE temp_path = "%s.tmp.%s" % (self.path, os.getpid())NEWLINE with open(temp_path, "w") as f:NEWLINE f.write(json_data)NEWLINE f.flush()NEWLINE os.fsync(f.fileno())NEWLINENEWLINE if os.path.exists(self.path):NEWLINE mode = os.stat(self.path).st_modeNEWLINE else:NEWLINE mode = stat.S_IREAD | stat.S_IWRITENEWLINE try:NEWLINE os.rename(temp_path, self.path)NEWLINE except Exception: # pylint: disable=broad-exceptNEWLINE os.remove(self.path)NEWLINE os.rename(temp_path, self.path)NEWLINE os.chmod(self.path, mode)NEWLINE
import sysNEWLINEfrom collections import namedtuple, OrderedDictNEWLINEfrom typing import Dict, ListNEWLINENEWLINEimport torchNEWLINEfrom torch import nn as nnNEWLINENEWLINEfrom model.decoder import DecoderNEWLINEfrom model.encoder import EncoderNEWLINEfrom utils import util, nn_utilNEWLINEfrom utils.ast import AbstractSyntaxTreeNEWLINEfrom utils.dataset import ExampleNEWLINEfrom utils.vocab import Vocab, SAME_VARIABLE_TOKEN, END_OF_VARIABLE_TOKENNEWLINENEWLINENEWLINEclass RecurrentSubtokenDecoder(Decoder):NEWLINE def __init__(self, variable_encoding_size: int, hidden_size: int, dropout: float, tie_embed: bool, input_feed: bool, vocab: Vocab):NEWLINE super(Decoder, self).__init__()NEWLINENEWLINE self.vocab = vocabNEWLINENEWLINE lstm_x_dim = variable_encoding_size + hidden_sizeNEWLINE if input_feed:NEWLINE lstm_x_dim += hidden_sizeNEWLINENEWLINE self.lstm_cell = nn.LSTMCell(lstm_x_dim, hidden_size) # v_encoding_t + e(y_tm1)NEWLINE self.state2names = nn.Linear(hidden_size, len(vocab.target), bias=True)NEWLINE if not tie_embed:NEWLINE self.var_name_embed = nn.Embedding(len(vocab.target), hidden_size)NEWLINENEWLINE self.dropout = nn.Dropout(dropout)NEWLINE self.config: Dict = NoneNEWLINENEWLINE self.Hypothesis = namedtuple('Hypothesis', ['variable_list', 'variable_ptr', 'score'])NEWLINENEWLINE @propertyNEWLINE def device(self):NEWLINE return self.state2names.weight.deviceNEWLINENEWLINE @classmethodNEWLINE def default_params(cls):NEWLINE return {NEWLINE 'vocab_file': None,NEWLINE 'variable_encoding_size': 128,NEWLINE 'hidden_size': 128,NEWLINE 'input_feed': False,NEWLINE 'tie_embedding': True,NEWLINE 'dropout': 0.2,NEWLINE 'beam_size': 5,NEWLINE 'max_prediction_time_step': 1200,NEWLINE 'independent_prediction_for_each_variable': FalseNEWLINE }NEWLINENEWLINE @propertyNEWLINE def independent_prediction_for_each_variable(self):NEWLINE return self.config['independent_prediction_for_each_variable']NEWLINENEWLINE @classmethodNEWLINE def build(cls, config):NEWLINE params = util.update(cls.default_params(), config)NEWLINENEWLINE vocab = Vocab.load(params['vocab_file'])NEWLINE model = cls(params['variable_encoding_size'],NEWLINE params['hidden_size'], params['dropout'], params['tie_embedding'], params['input_feed'], vocab)NEWLINE model.config = paramsNEWLINENEWLINE return modelNEWLINENEWLINE def get_init_state(self, src_ast_encoding):NEWLINE return self.encoder.get_decoder_init_state(src_ast_encoding, self.config)NEWLINENEWLINE def rnn_step(self, x, h_tm1, src_ast_encoding):NEWLINE h_t = self.lstm_cell(x, h_tm1)NEWLINE # TODO: implement attention?NEWLINE # att_t = torch.tanh(self.att_vec_linear(torch.cat([h_t], 1)))NEWLINE q_t = self.dropout(h_t[0])NEWLINENEWLINE return h_t, q_t, NoneNEWLINENEWLINE def forward(self, src_ast_encoding, prediction_target):NEWLINE # (batch_size, max_time_step)NEWLINE target_variable_encoding_indices = prediction_target['target_variable_encoding_indices']NEWLINE target_variable_encoding_indices_mask = prediction_target['target_variable_encoding_indices_mask']NEWLINENEWLINE batch_size = target_variable_encoding_indices.size(0)NEWLINE variable_encoding_size = src_ast_encoding['variable_encoding'].size(-1)NEWLINENEWLINE # (batch_size, max_time_step, encoding_size)NEWLINE # scatter variable encoding to sub-token time stepsNEWLINE variable_encoding = torch.gather(src_ast_encoding['variable_encoding'], 1,NEWLINE target_variable_encoding_indices.unsqueeze(-1).expand(-1, -1, variable_encoding_size))NEWLINE # (batch_size, max_time_step, encoding_size)NEWLINE variable_tgt_name_id = prediction_target['variable_tgt_name_id']NEWLINENEWLINE h_0 = self.get_init_state(src_ast_encoding)NEWLINE att_tm1 = variable_encoding.new_zeros(src_ast_encoding['batch_size'], self.lstm_cell.hidden_size)NEWLINE v_tm1_name_embed = torch.zeros(batch_size, self.lstm_cell.hidden_size, device=self.device)NEWLINENEWLINE h_tm1 = h_0NEWLINE query_vecs = []NEWLINE max_time_step = variable_encoding.size(1)NEWLINE for t, variable_encoding_t in enumerate(variable_encoding.split(split_size=1, dim=1)):NEWLINE # variable_encoding_t: (batch_size, encoding_size)NEWLINE variable_encoding_t = variable_encoding_t.squeeze(1)NEWLINENEWLINE if self.config['input_feed']:NEWLINE x = torch.cat([variable_encoding_t, v_tm1_name_embed, att_tm1], dim=-1)NEWLINE else:NEWLINE x = torch.cat([variable_encoding_t, v_tm1_name_embed], dim=-1)NEWLINENEWLINE h_t, q_t, alpha_t = self.rnn_step(x, h_tm1, src_ast_encoding)NEWLINENEWLINE att_tm1 = q_tNEWLINE h_tm1 = h_tNEWLINE query_vecs.append(q_t)NEWLINE v_tm1_name_id = variable_tgt_name_id[:, t]NEWLINE if self.config['tie_embedding']:NEWLINE v_tm1_name_embed = self.state2names.weight[v_tm1_name_id]NEWLINE else:NEWLINE v_tm1_name_embed = self.var_name_embed(v_tm1_name_id)NEWLINENEWLINE if self.independent_prediction_for_each_variable and t < max_time_step - 1:NEWLINE # (batch_size, )NEWLINE variable_ids_tp1 = target_variable_encoding_indices[:, t + 1]NEWLINE variable_ids_t = target_variable_encoding_indices[:, t]NEWLINENEWLINE is_tp1_same_variable = torch.eq(variable_ids_tp1, variable_ids_t).float().unsqueeze(-1) # TODO: check if correct!NEWLINE h_tm1 = (h_tm1[0] * is_tp1_same_variable, h_tm1[1] * is_tp1_same_variable)NEWLINE att_tm1 = att_tm1 * is_tp1_same_variableNEWLINE v_tm1_name_embed = v_tm1_name_embed * is_tp1_same_variableNEWLINENEWLINE # (batch_size, max_prediction_node_num, encoding_size)NEWLINE query_vecs = torch.stack(query_vecs).permute(1, 0, 2)NEWLINENEWLINE # (batch_size, max_prediction_node_num, vocab_size)NEWLINE logits = self.state2names(query_vecs)NEWLINE var_name_log_probs = torch.log_softmax(logits, dim=-1)NEWLINE var_name_log_probs = var_name_log_probs * target_variable_encoding_indices_mask.unsqueeze(-1)NEWLINENEWLINE return var_name_log_probsNEWLINENEWLINE def get_target_log_prob(self, var_name_log_probs, prediction_target, src_ast_encoding):NEWLINE # (batch_size, max_prediction_node_num)NEWLINE variable_tgt_name_id = prediction_target['variable_tgt_name_id']NEWLINE tgt_var_name_log_prob = torch.gather(var_name_log_probs,NEWLINE dim=-1, index=variable_tgt_name_id.unsqueeze(-1)).squeeze(-1)NEWLINENEWLINE tgt_var_name_log_prob = tgt_var_name_log_prob * prediction_target['target_variable_encoding_indices_mask']NEWLINENEWLINE result = dict(tgt_var_name_log_prob=tgt_var_name_log_prob)NEWLINENEWLINE return resultNEWLINENEWLINE def predict(self, examples: List[Example], encoder: Encoder) -> List[Dict]:NEWLINE batch_size = len(examples)NEWLINE beam_size = self.config['beam_size']NEWLINE same_variable_id = self.vocab.target[SAME_VARIABLE_TOKEN]NEWLINE end_of_variable_id = self.vocab.target[END_OF_VARIABLE_TOKEN]NEWLINENEWLINE variable_nums = []NEWLINE for ast_id, example in enumerate(examples):NEWLINE variable_nums.append(len(example.ast.variables))NEWLINENEWLINE beams = OrderedDict((ast_id, [self.Hypothesis([], 0, 0.)]) for ast_id in range(batch_size))NEWLINE hyp_scores_tm1 = torch.zeros(len(beams), device=self.device)NEWLINE completed_hyps = [[] for _ in range(batch_size)]NEWLINE tgt_vocab_size = len(self.vocab.target)NEWLINENEWLINE tensor_dict = self.batcher.to_tensor_dict(examples)NEWLINE nn_util.to(tensor_dict, self.device)NEWLINENEWLINE context_encoding = encoder(tensor_dict)NEWLINE h_tm1 = h_0 = self.get_init_state(context_encoding)NEWLINENEWLINE # Note that we are using the `restoration_indices` from `context_encoding`, which is the word-level restoration indexNEWLINE # (batch_size, variable_master_node_num, encoding_size)NEWLINE variable_encoding = context_encoding['variable_encoding']NEWLINE # (batch_size, encoding_size)NEWLINE variable_name_embed_tm1 = att_tm1 = torch.zeros(batch_size, self.lstm_cell.hidden_size, device=self.device)NEWLINENEWLINE max_prediction_time_step = self.config['max_prediction_time_step']NEWLINE for t in range(0, max_prediction_time_step):NEWLINE # (total_live_hyp_num, encoding_size)NEWLINE if t > 0:NEWLINE variable_encoding_t = variable_encoding[hyp_ast_ids_t, hyp_variable_ptrs_t]NEWLINE else:NEWLINE variable_encoding_t = variable_encoding[:, 0]NEWLINENEWLINE if self.config['input_feed']:NEWLINE x = torch.cat([variable_encoding_t, variable_name_embed_tm1, att_tm1], dim=-1)NEWLINE else:NEWLINE x = torch.cat([variable_encoding_t, variable_name_embed_tm1], dim=-1)NEWLINENEWLINE h_t, q_t, alpha_t = self.rnn_step(x, h_tm1, context_encoding)NEWLINENEWLINE # (total_live_hyp_num, vocab_size)NEWLINE hyp_var_name_scores_t = torch.log_softmax(self.state2names(q_t), dim=-1)NEWLINENEWLINE cont_cand_hyp_scores = hyp_scores_tm1.unsqueeze(-1) + hyp_var_name_scores_tNEWLINENEWLINE new_beams = OrderedDict()NEWLINE live_beam_ids = []NEWLINE new_hyp_scores = []NEWLINE live_prev_hyp_ids = []NEWLINE new_hyp_var_name_ids = []NEWLINE new_hyp_ast_ids = []NEWLINE new_hyp_variable_ptrs = []NEWLINE is_same_variable_mask = []NEWLINE beam_start_hyp_pos = 0NEWLINE for beam_id, (ast_id, beam) in enumerate(beams.items()):NEWLINE beam_end_hyp_pos = beam_start_hyp_pos + len(beam)NEWLINE # (live_beam_size, vocab_size)NEWLINE beam_cont_cand_hyp_scores = cont_cand_hyp_scores[beam_start_hyp_pos: beam_end_hyp_pos]NEWLINE cont_beam_size = beam_size - len(completed_hyps[ast_id])NEWLINE beam_new_hyp_scores, beam_new_hyp_positions = torch.topk(beam_cont_cand_hyp_scores.view(-1),NEWLINE k=cont_beam_size,NEWLINE dim=-1)NEWLINENEWLINE # (cont_beam_size)NEWLINE beam_prev_hyp_ids = beam_new_hyp_positions / tgt_vocab_sizeNEWLINE beam_hyp_var_name_ids = beam_new_hyp_positions % tgt_vocab_sizeNEWLINENEWLINE _prev_hyp_ids = beam_prev_hyp_ids.cpu()NEWLINE _hyp_var_name_ids = beam_hyp_var_name_ids.cpu()NEWLINE _new_hyp_scores = beam_new_hyp_scores.cpu()NEWLINENEWLINE for i in range(cont_beam_size):NEWLINE prev_hyp_id = _prev_hyp_ids[i].item()NEWLINE prev_hyp = beam[prev_hyp_id]NEWLINE hyp_var_name_id = _hyp_var_name_ids[i].item()NEWLINE new_hyp_score = _new_hyp_scores[i].item()NEWLINENEWLINE variable_ptr = prev_hyp.variable_ptrNEWLINE if hyp_var_name_id == end_of_variable_id:NEWLINE variable_ptr += 1NEWLINENEWLINE # remove empty casesNEWLINE if len(prev_hyp.variable_list) == 0 or prev_hyp.variable_list[-1] == end_of_variable_id:NEWLINE continueNEWLINENEWLINE new_hyp = self.Hypothesis(variable_list=list(prev_hyp.variable_list) + [hyp_var_name_id],NEWLINE variable_ptr=variable_ptr,NEWLINE score=new_hyp_score)NEWLINENEWLINE if variable_ptr == variable_nums[ast_id]:NEWLINE completed_hyps[ast_id].append(new_hyp)NEWLINE else:NEWLINE new_beams.setdefault(ast_id, []).append(new_hyp)NEWLINE live_beam_ids.append(beam_id)NEWLINE new_hyp_scores.append(new_hyp_score)NEWLINE live_prev_hyp_ids.append(beam_start_hyp_pos + prev_hyp_id)NEWLINE new_hyp_var_name_ids.append(hyp_var_name_id)NEWLINE new_hyp_ast_ids.append(ast_id)NEWLINE new_hyp_variable_ptrs.append(variable_ptr)NEWLINE is_same_variable_mask.append(1. if prev_hyp.variable_ptr == variable_ptr else 0.)NEWLINENEWLINE beam_start_hyp_pos = beam_end_hyp_posNEWLINENEWLINE if live_beam_ids:NEWLINE hyp_scores_tm1 = torch.tensor(new_hyp_scores, device=self.device)NEWLINE h_tm1 = (h_t[0][live_prev_hyp_ids], h_t[1][live_prev_hyp_ids])NEWLINE att_tm1 = q_t[live_prev_hyp_ids]NEWLINENEWLINE variable_name_embed_tm1 = self.state2names.weight[new_hyp_var_name_ids]NEWLINE hyp_ast_ids_t = new_hyp_ast_idsNEWLINE hyp_variable_ptrs_t = new_hyp_variable_ptrsNEWLINENEWLINE beams = new_beamsNEWLINENEWLINE if self.independent_prediction_for_each_variable:NEWLINE is_same_variable_mask = torch.tensor(is_same_variable_mask, device=self.device, dtype=torch.float).unsqueeze(-1)NEWLINE h_tm1 = (h_tm1[0] * is_same_variable_mask, h_tm1[1] * is_same_variable_mask)NEWLINE att_tm1 = att_tm1 * is_same_variable_maskNEWLINE variable_name_embed_tm1 = variable_name_embed_tm1 * is_same_variable_maskNEWLINE else:NEWLINE breakNEWLINENEWLINE variable_rename_results = []NEWLINE for i, hyps in enumerate(completed_hyps):NEWLINE variable_rename_result = dict()NEWLINE ast = examples[i].astNEWLINE hyps = sorted(hyps, key=lambda hyp: -hyp.score)NEWLINENEWLINE if not hyps:NEWLINE # return identity renamingsNEWLINE print(f'Failed to found a hypothesis for function {ast.compilation_unit}', file=sys.stderr)NEWLINE for old_name in ast.variables:NEWLINE variable_rename_result[old_name] = {'new_name': old_name,NEWLINE 'prob': 0.}NEWLINE else:NEWLINE top_hyp = hyps[0]NEWLINE sub_token_ptr = 0NEWLINE for old_name in ast.variables:NEWLINE sub_token_begin = sub_token_ptrNEWLINE while top_hyp.variable_list[sub_token_ptr] != end_of_variable_id:NEWLINE sub_token_ptr += 1NEWLINE sub_token_ptr += 1 # point to first sub-token of next variableNEWLINE sub_token_end = sub_token_ptrNEWLINENEWLINE var_name_token_ids = top_hyp.variable_list[sub_token_begin: sub_token_end] # include ending </s>NEWLINE if var_name_token_ids == [same_variable_id, end_of_variable_id]:NEWLINE new_var_name = old_nameNEWLINE else:NEWLINE new_var_name = self.vocab.target.subtoken_model.decode_ids(var_name_token_ids)NEWLINENEWLINE variable_rename_result[old_name] = {'new_name': new_var_name,NEWLINE 'prob': top_hyp.score}NEWLINENEWLINE variable_rename_results.append(variable_rename_result)NEWLINENEWLINE return variable_rename_resultsNEWLINE
from bingads.v13.bulk.entities.audiences.bulk_campaign_audience_association import BulkCampaignAudienceAssociationNEWLINENEWLINEclass BulkCampaignProductAudienceAssociation(BulkCampaignAudienceAssociation):NEWLINE """ Represents an Campaign Product Audience Association that can be read or written in a bulk file.NEWLINENEWLINE For more information, see Campaign Product Audience Association at https://go.microsoft.com/fwlink/?linkid=846127.NEWLINENEWLINE *See also:*NEWLINENEWLINE * :class:`.BulkServiceManager`NEWLINE * :class:`.BulkOperation`NEWLINE * :class:`.BulkFileReader`NEWLINE * :class:`.BulkFileWriter`NEWLINE """NEWLINE
from pycocotools.coco import COCONEWLINENEWLINEimport matplotlib.pyplot as pltNEWLINEimport cv2NEWLINENEWLINEimport osNEWLINEimport numpy as npNEWLINEimport randomNEWLINEimport torchNEWLINEimport torchvision.transforms as transformsNEWLINEfrom torch.utils.data import DataLoader,DatasetNEWLINEfrom skimage import io,transformNEWLINEimport matplotlib.pyplot as pltNEWLINEimport osNEWLINEimport torchNEWLINEfrom torchvision import transformsNEWLINEimport numpy as npNEWLINEimport PIL.Image as ImageNEWLINEfrom skimage import measureNEWLINEfrom tqdm import tqdmNEWLINEimport torch.nn.functional as FNEWLINEfrom skimage.morphology import convex_hull_imageNEWLINENEWLINEclass SuperPixelGet(Dataset): #继承DatasetNEWLINE def __init__(self, segments_label, segments_tensor, g_theta_m, data_num):NEWLINE self.segments_label = segments_label.cuda()NEWLINE self.segments_tensor = segments_tensor.cuda()NEWLINE self.g_theta_m = g_theta_m.cuda()NEWLINE self.data_num = data_numNEWLINENEWLINENEWLINE self.zero_layer = torch.zeros_like(self.segments_tensor)NEWLINE self.one_layer = torch.ones_like(self.segments_tensor)NEWLINENEWLINE NEWLINE def __len__(self):NEWLINE return self.data_numNEWLINE NEWLINE def __getitem__(self, index):NEWLINENEWLINE attack_region_tmp = self.zero_layer.clone()NEWLINE flag = torch.rand_like( self.segments_label) < self.g_theta_m NEWLINE for i in range(flag.shape[0]):NEWLINE if flag[i]:NEWLINE sp = self.segments_label[i]NEWLINE attack_region_tmp = torch.where(self.segments_tensor==sp, self.one_layer, attack_region_tmp)NEWLINE NEWLINE # # get convex envolopeNEWLINE # attack_region_tmp_np = attack_region_tmp.cpu().numpy()NEWLINE # attack_region_tmp_label_np = measure.label(attack_region_tmp_np)NEWLINE # connect_region_number = int(np.max(attack_region_tmp_label_np))NEWLINE NEWLINE # one_np = np.ones_like(attack_region_tmp_np)NEWLINE # zero_np = np.zeros_like(attack_region_tmp_np)NEWLINE # attack_region_envolope_np = np.zeros_like(attack_region_tmp_np)NEWLINENEWLINE # for i in range(connect_region_number):NEWLINE # binary_map = np.where(attack_region_tmp_label_np==i+1, one_np, zero_np)NEWLINE # convex_env = convex_hull_image(binary_map)NEWLINENEWLINE # attack_region_envolope_np = attack_region_envolope_np + convex_envNEWLINE # passNEWLINENEWLINENEWLINE # attack_region_tmp = torch.from_numpy(attack_region_envolope_np)NEWLINE # attack_region_tmp = torch.clamp(attack_region_tmp, 0, 1).cuda()NEWLINENEWLINE return attack_region_tmp, flagNEWLINENEWLINEif __name__=='__main__':NEWLINE segments_tensor = [NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,5,5,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,4,5,5,0,0],NEWLINE [0,0,1,1,1,2,3,3,3,3,4,4,4,5,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE [0,0,1,1,1,2,2,2,3,3,4,4,6,6,5,0,0],NEWLINE ]NEWLINE segments_tensor = torch.Tensor(segments_tensor)NEWLINE g_theta_m = torch.Tensor([0.1,0.2,0.3,0.4,0.5,0.6])NEWLINE data_num = 555NEWLINE data = SuperPixelGet(torch.Tensor([1,2,3,4,5,6]), segments_tensor, g_theta_m, data_num)NEWLINE dataloader = DataLoader(data, batch_size=128,shuffle=False) #使用DataLoader加载数据NEWLINENEWLINE max_len = 0NEWLINE for epoch in range(10):NEWLINE for i_batch, batch_data in enumerate(dataloader):NEWLINENEWLINE sum_tensor = torch.sum(batch_data, dim=0)NEWLINE sum_tensor = sum_tensor/torch.max(sum_tensor)NEWLINE sum_tensor = sum_tensor.unsqueeze(0).unsqueeze(0)NEWLINE sum_tensor = F.interpolate(sum_tensor, (800, 800), mode='nearest').squeeze()NEWLINE sum_pil = transforms.ToPILImage()(sum_tensor)NEWLINE sum_pil.show()NEWLINENEWLINE passNEWLINENEWLINENEWLINE
# Licensed under the Apache License, Version 2.0 (the "License"); you mayNEWLINE# not use this file except in compliance with the License. You may obtainNEWLINE# a copy of the License atNEWLINE#NEWLINE# http://www.apache.org/licenses/LICENSE-2.0NEWLINE#NEWLINE# Unless required by applicable law or agreed to in writing, softwareNEWLINE# distributed under the License is distributed on an "AS IS" BASIS, WITHOUTNEWLINE# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See theNEWLINE# License for the specific language governing permissions and limitationsNEWLINE# under the License.NEWLINENEWLINEfrom neutron.objects.qos import policy as qos_policyNEWLINEfrom neutron.objects.qos import rule as qos_ruleNEWLINEfrom neutron_lib.api.definitions import portbindingsNEWLINEfrom neutron_lib import constantsNEWLINEfrom neutron_lib import context as n_contextNEWLINEfrom neutron_lib.db import constants as db_constsNEWLINEfrom neutron_lib.plugins import directoryNEWLINEfrom neutron_lib.services.qos import baseNEWLINEfrom neutron_lib.services.qos import constants as qos_constsNEWLINEfrom oslo_config import cfgNEWLINEfrom oslo_log import log as loggingNEWLINENEWLINEfrom networking_ovn.common import utilsNEWLINENEWLINELOG = logging.getLogger(__name__)NEWLINENEWLINEOVN_QOS = 'qos'NEWLINESUPPORTED_RULES = {NEWLINE qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: {NEWLINE qos_consts.MAX_KBPS: {NEWLINE 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]},NEWLINE qos_consts.MAX_BURST: {NEWLINE 'type:range': [0, db_consts.DB_INTEGER_MAX_VALUE]},NEWLINE qos_consts.DIRECTION: {NEWLINE 'type:values': [constants.EGRESS_DIRECTION]}NEWLINE },NEWLINE}NEWLINENEWLINEVIF_TYPES = [portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER]NEWLINEVNIC_TYPES = [portbindings.VNIC_NORMAL]NEWLINENEWLINENEWLINEclass OVNQosNotificationDriver(base.DriverBase):NEWLINE """OVN notification driver for QoS."""NEWLINENEWLINE def __init__(self, name='OVNQosDriver',NEWLINE vif_types=VIF_TYPES,NEWLINE vnic_types=VNIC_TYPES,NEWLINE supported_rules=SUPPORTED_RULES,NEWLINE requires_rpc_notifications=False):NEWLINE super(OVNQosNotificationDriver, self).__init__(NEWLINE name, vif_types, vnic_types, supported_rules,NEWLINE requires_rpc_notifications)NEWLINENEWLINE @classmethodNEWLINE def create(cls, plugin_driver):NEWLINE cls._driver = plugin_driverNEWLINE return cls()NEWLINENEWLINE @propertyNEWLINE def is_loaded(self):NEWLINE return OVN_QOS in cfg.CONF.ml2.extension_driversNEWLINENEWLINE def create_policy(self, context, policy):NEWLINE # No need to update OVN on createNEWLINE passNEWLINENEWLINE def update_policy(self, context, policy):NEWLINE # Call into OVN client to update the policyNEWLINE self._driver._ovn_client._qos_driver.update_policy(context, policy)NEWLINENEWLINE def delete_policy(self, context, policy):NEWLINE # No need to update OVN on deleteNEWLINE passNEWLINENEWLINENEWLINEclass OVNQosDriver(object):NEWLINE """Qos driver for OVN"""NEWLINENEWLINE def __init__(self, driver):NEWLINE LOG.info("Starting OVNQosDriver")NEWLINE super(OVNQosDriver, self).__init__()NEWLINE self._driver = driverNEWLINE self._plugin_property = NoneNEWLINENEWLINE @propertyNEWLINE def _plugin(self):NEWLINE if self._plugin_property is None:NEWLINE self._plugin_property = directory.get_plugin()NEWLINE return self._plugin_propertyNEWLINENEWLINE def _generate_port_options(self, context, policy_id):NEWLINE if policy_id is None:NEWLINE return {}NEWLINE options = {}NEWLINE # The policy might not have any rulesNEWLINE all_rules = qos_rule.get_rules(qos_policy.QosPolicy,NEWLINE context, policy_id)NEWLINE for rule in all_rules:NEWLINE if isinstance(rule, qos_rule.QosBandwidthLimitRule):NEWLINE if rule.max_kbps:NEWLINE options['qos_max_rate'] = str(rule.max_kbps * 1000)NEWLINE if rule.max_burst_kbps:NEWLINE options['qos_burst'] = str(rule.max_burst_kbps * 1000)NEWLINE return optionsNEWLINENEWLINE def get_qos_options(self, port):NEWLINE # Is qos service enabledNEWLINE if 'qos_policy_id' not in port:NEWLINE return {}NEWLINE # Don't apply qos rules to network devicesNEWLINE if utils.is_network_device_port(port):NEWLINE return {}NEWLINENEWLINE # Determine if port or network policy should be usedNEWLINE context = n_context.get_admin_context()NEWLINE port_policy_id = port.get('qos_policy_id')NEWLINE network_policy_id = NoneNEWLINE if not port_policy_id:NEWLINE network_policy = qos_policy.QosPolicy.get_network_policy(NEWLINE context, port['network_id'])NEWLINE network_policy_id = network_policy.id if network_policy else NoneNEWLINENEWLINE # Generate qos options for the selected policyNEWLINE policy_id = port_policy_id or network_policy_idNEWLINE return self._generate_port_options(context, policy_id)NEWLINENEWLINE def _update_network_ports(self, context, network_id, options):NEWLINE # Retrieve all ports for this networkNEWLINE ports = self._plugin.get_ports(context,NEWLINE filters={'network_id': [network_id]})NEWLINE for port in ports:NEWLINE # Don't apply qos rules if port has a policyNEWLINE port_policy_id = port.get('qos_policy_id')NEWLINE if port_policy_id:NEWLINE continueNEWLINE # Don't apply qos rules to network devicesNEWLINE if utils.is_network_device_port(port):NEWLINE continueNEWLINE # Call into OVN client to update portNEWLINE self._driver.update_port(port, qos_options=options)NEWLINENEWLINE def update_network(self, network):NEWLINE # Is qos service enabledNEWLINE if 'qos_policy_id' not in network:NEWLINE returnNEWLINENEWLINE # Update the qos options on each network portNEWLINE context = n_context.get_admin_context()NEWLINE options = self._generate_port_options(NEWLINE context, network['qos_policy_id'])NEWLINE self._update_network_ports(context, network.get('id'), options)NEWLINENEWLINE def update_policy(self, context, policy):NEWLINE options = self._generate_port_options(context, policy.id)NEWLINENEWLINE # Update each network bound to this policyNEWLINE network_bindings = policy.get_bound_networks()NEWLINE for network_id in network_bindings:NEWLINE self._update_network_ports(context, network_id, options)NEWLINENEWLINE # Update each port bound to this policyNEWLINE port_bindings = policy.get_bound_ports()NEWLINE for port_id in port_bindings:NEWLINE port = self._plugin.get_port(context, port_id)NEWLINE self._driver.update_port(port, qos_options=options)NEWLINE
from discord.ext import commandsNEWLINEimport discordNEWLINEimport requestsNEWLINEfrom bs4 import BeautifulSoupNEWLINEimport reNEWLINENEWLINEclass neorg_cmds(commands.Cog):NEWLINENEWLINE def __init__(self, bot):NEWLINE self.bot = botNEWLINENEWLINE @commands.command()NEWLINE async def wiki(self, ctx, *, query):NEWLINE query = query.strip().lower().replace(' ', '-')NEWLINE neorg_wiki = {}NEWLINE wiki_url = "https://github.com/vhyrro/neorg/wiki"NEWLINENEWLINE stuff = BeautifulSoup(requests.get(wiki_url).text, 'lxml')NEWLINE lis = stuff.find_all("div", {"class": "Box-body wiki-custom-sidebar markdown-body"})[0]NEWLINENEWLINE for li in lis.find_all('li'):NEWLINE part = li.a['href']NEWLINE neorg_wiki[part[37:].lower()] = partNEWLINENEWLINE wiki = [neorg_wiki[k] for k in neorg_wiki.keys() if query in k.lower()]NEWLINENEWLINE if len(wiki) == 0:NEWLINE await ctx.send(embed=discord.Embed(description="No Results Found!", colour=0x4878BE))NEWLINE returnNEWLINE NEWLINE for i in wiki:NEWLINE em = discord.Embed(description=i, colour=0x4878BE)NEWLINE await ctx.send(embed=em)NEWLINENEWLINE @commands.command()NEWLINE async def spec(self, ctx, *, query):NEWLINE query = query.strip().lower().replace(' ', '-')NEWLINE url = "https://raw.githubusercontent.com/vhyrro/neorg/main/docs/NFF-0.1-spec.md"NEWLINE og_url = "https://github.com/vhyrro/neorg/blob/main/docs/NFF-0.1-spec.md"NEWLINENEWLINE soup = re.findall( r"\[(.+)\]\((.+)\)", requests.get(url).text[:1500])NEWLINE neorg_specs = {}NEWLINENEWLINE for k,v in soup:NEWLINE neorg_specs[k.lower().replace(' ', '-')] = og_url + vNEWLINENEWLINE spec = [neorg_specs[k] for k in neorg_specs.keys() if query in k.lower()]NEWLINENEWLINE if len(spec) == 0:NEWLINE await ctx.send(embed=discord.Embed(description="No Results Found!", colour=0x4878BE))NEWLINE returnNEWLINENEWLINE for i in spec:NEWLINE em = discord.Embed(description=i, colour=0x4878BE)NEWLINE await ctx.send(embed=em)NEWLINENEWLINE @commands.command(aliases=["norg"])NEWLINE async def neorg(self, ctx):NEWLINE """Fetch the Neorg repository"""NEWLINE await ctx.send("Neorg - https://github.com/vhyrro/neorg")NEWLINENEWLINEdef setup(bot):NEWLINE bot.add_cog(neorg_cmds(bot))NEWLINE
from django.db import modelsNEWLINEtipo_choices=(NEWLINE ('colegio','Colegio'),NEWLINE ('iglesia','Iglesia'),NEWLINE ('plaza','Plaza'),NEWLINE ('restaurante','Restaurante'),NEWLINE ('alojamiento','Alojamiento'),NEWLINE )NEWLINENEWLINEclass admin_agregar (models.Model):NEWLINE cod_ubicacion= models.AutoField(primary_key = True)NEWLINE latitud = models.CharField(max_length=200, blank=False, null=False)NEWLINE longitud = models.CharField(max_length=200, blank=False, null=False)NEWLINE tipo = models.CharField(max_length=200, blank=False, null=False, choices=tipo_choices)NEWLINE def __STR__(self):NEWLINE return self.tipoNEWLINENEWLINEclass colegio(models.Model):NEWLINE cod_colegio=models.AutoField(primary_key=True)NEWLINE nombre= models.CharField(max_length=200, blank=False, null=False)NEWLINE fecha_fundacion = models.CharField(max_length=200,blank=False, null=False)NEWLINE clasificacion = models.CharField(max_length=200, blank=False)NEWLINE cod_ubicacion=models.ForeignKey(admin_agregar, on_delete=models.CASCADE)NEWLINENEWLINEclass iglesia(models.Model):NEWLINE cod_iglesia=models.AutoField(primary_key=True)NEWLINE nombre= models.CharField(max_length=200, blank=False, null=False)NEWLINE fecha_fundacion = models.CharField(max_length=200,blank=False, null=False)NEWLINE religion= models.CharField(max_length=200, blank=False, null=False)NEWLINE capacidad=models.CharField(max_length=200, blank=False, null=False)NEWLINE cod_ubicacion=models.ForeignKey(admin_agregar, on_delete=models.CASCADE)NEWLINENEWLINEclass alojamiento(models.Model):NEWLINE cod_alojamiento=models.AutoField(primary_key=True)NEWLINE nombre= models.CharField(max_length=200, blank=False, null=False)NEWLINE clasificacion = models.CharField(max_length=200,blank=False, null=False)NEWLINE fecha_fundacion = models.CharField(max_length=200,blank=False, null=False)NEWLINE cod_ubicacion=models.ForeignKey(admin_agregar, on_delete=models.CASCADE)NEWLINENEWLINEclass plaza(models.Model):NEWLINE cod_plaza=models.AutoField(primary_key=True)NEWLINE nombre=models.CharField(max_length=200, blank=False, null=False)NEWLINE fecha_fundacion = models.DateField(blank=False, null=False)NEWLINE cod_ubicacion=models.OneToOneField(admin_agregar, on_delete=models.CASCADE)NEWLINENEWLINENEWLINENEWLINEclass restaurante(models.Model):NEWLINE cod_restaurante=models.AutoField(primary_key=True)NEWLINE nombre=models.CharField(max_length=200, blank=False, null=False)NEWLINE capacidad=models.CharField(max_length=200, blank=False, null=False)NEWLINE clasificacion = models.CharField(max_length=200,blank=False, null=False)NEWLINE cod_ubicacion= models.ForeignKey(admin_agregar, on_delete=models.CASCADE)

No dataset card yet

New: Create and edit this dataset card directly on the website!

Contribute a Dataset Card
Downloads last month
50
Add dataset card