content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
#!/usr/bin/env python3
import io
import unittest.mock
import yaz
class ConfigurationPlugin(yaz.Plugin):
"""This is the documentation string for the ConfigurationPlugin"""
choices = {
"yes": True,
"no": False,
"unknown": None,
}
@yaz.task(choice__choices=["yes", "no", "unknown"])
def required_choice(self, choice):
"""This is the documentation for the required_choice task"""
return self.choices[choice]
@yaz.task
def one_line_doc_string(self):
"""This is the documentation for the one_line_doc_string task"""
pass
@yaz.task
def multi_line_doc_string(self):
"""
This is the documentation for the multi_line_doc_string task
This is the long description, for example:
bla bla,
etc...
"""
pass
@yaz.task(choice__help="This is the documentation for the choice parameter of the parameter_help task")
def parameter_help(self, choice):
"""This is the documentation for the parameter_help task"""
pass
class Test(yaz.TestCase):
def test_010_plugin_help(self):
"""Should show plugin help texts from docstring or configuration"""
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the required_choice task")
self.assertRegex(output, r"This is the documentation for the one_line_doc_string task")
self.assertRegex(output, r"This is the documentation for the parameter_help task")
# we expect the first line of the the multi_line_doc_string task, not the rest
self.assertRegex(output, r"This is the documentation for the multi_line_doc_string task")
self.assertNotRegex(output, r"This is the long description, for example")
def test_020_task_help__docstring(self):
"""Should show task help texts from docstring or configuration"""
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("multi-line-doc-string", "--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertNotRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the multi_line_doc_string task")
self.assertRegex(output, r"This is the long description, for example")
def test_030_task_help__parameter(self):
"""Should show task help texts from docstring or configuration"""
caller = self.get_caller([ConfigurationPlugin])
with unittest.mock.patch("sys.stdout", new=io.StringIO()) as stdout:
with self.assertRaises(SystemExit):
caller("parameter-help", "--help")
stdout.seek(0)
output = stdout.read()
print(output)
self.assertNotRegex(output, r"This is the documentation string for the ConfigurationPlugin")
self.assertRegex(output, r"This is the documentation for the parameter_help task")
self.assertRegex(output, r"This is the documentation for the choice parameter of the\n.*parameter_help task")
def test_040_choices_configuration(self):
"""Should accept predefined choices"""
caller = self.get_caller([ConfigurationPlugin])
# using available choice
self.assertTrue(caller("required-choice", "yes"))
# using unavailable choice
with unittest.mock.patch("sys.stderr", new=io.StringIO()):
with self.assertRaises(SystemExit):
caller("required-choice", "unavailable")
if __name__ == "__main__":
yaz.main()
| 35.612069 | 117 | 0.661825 | [
"MIT"
] | boudewijn-zicht/yaz | yaz/test/test_task_configuration.py | 4,131 | Python |
import unittest
from malcolm.modules.builtin.vmetas import StringMeta
class TestValidate(unittest.TestCase):
def setUp(self):
self.string_meta = StringMeta("test string description")
def test_given_value_str_then_return(self):
response = self.string_meta.validate("TestValue")
assert "TestValue" == response
def test_given_value_int_then_cast_and_return(self):
response = self.string_meta.validate(15)
assert "15" == response
def test_given_value_float_then_cast_and_return(self):
response = self.string_meta.validate(12.8)
assert "12.8" == response
def test_given_value_None_then_return(self):
response = self.string_meta.validate(None)
assert "" == response
| 25.466667 | 64 | 0.708115 | [
"Apache-2.0"
] | MattTaylorDLS/pymalcolm | tests/test_modules/test_builtin/test_stringmeta.py | 764 | Python |
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
two-qubit ZZ-rotation gate.
"""
from qiskit import CompositeGate
from qiskit import Gate
from qiskit import QuantumCircuit
from qiskit._instructionset import InstructionSet
from qiskit._quantumregister import QuantumRegister
from qiskit.extensions.standard import header # pylint: disable=unused-import
class RZZGate(Gate):
"""Two-qubit ZZ-rotation gate."""
def __init__(self, theta, ctl, tgt, circ=None):
"""Create new rzz gate."""
super().__init__("rzz", [theta], [ctl, tgt], circ)
def qasm(self):
"""Return OPENQASM string."""
ctl = self.arg[0]
tgt = self.arg[1]
theta = self.param[0]
return self._qasmif("rzz(%s) %s[%d],%s[%d];" % (theta,
ctl[0].name, ctl[1],
tgt[0].name, tgt[1]))
def inverse(self):
"""Invert this gate."""
self.param[0] = -self.param[0]
return self
def reapply(self, circ):
"""Reapply this gate to corresponding qubits in circ."""
self._modifiers(circ.rzz(self.param[0], self.arg[0], self.arg[1]))
def rzz(self, theta, ctl, tgt):
"""Apply RZZ to circuit."""
if isinstance(ctl, QuantumRegister) and \
isinstance(tgt, QuantumRegister) and len(ctl) == len(tgt):
instructions = InstructionSet()
for i in range(ctl.size):
instructions.add(self.rzz(theta, (ctl, i), (tgt, i)))
return instructions
self._check_qubit(ctl)
self._check_qubit(tgt)
self._check_dups([ctl, tgt])
return self._attach(RZZGate(theta, ctl, tgt, self))
# Add to QuantumCircuit and CompositeGate classes
QuantumCircuit.rzz = rzz
CompositeGate.rzz = rzz
| 33.630137 | 79 | 0.619552 | [
"Apache-2.0"
] | christians94/qiskit-sdk-py | qiskit/extensions/standard/rzz.py | 2,455 | Python |
import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from smoothot.projection import projection_simplex
def _projection_simplex(v, z=1):
"""
Old implementation for test and benchmark purposes.
The arguments v and z should be a vector and a scalar, respectively.
"""
n_features = v.shape[0]
u = np.sort(v)[::-1]
cssv = np.cumsum(u) - z
ind = np.arange(n_features) + 1
cond = u - cssv / ind > 0
rho = ind[cond][-1]
theta = cssv[cond][-1] / float(rho)
w = np.maximum(v - theta, 0)
return w
def test_projection_simplex():
rng = np.random.RandomState(0)
V = rng.rand(100, 10)
# Axis = None case.
w = projection_simplex(V[0], z=1, axis=None)
w2 = _projection_simplex(V[0], z=1)
assert_array_almost_equal(w, w2)
w = projection_simplex(V, z=1, axis=None)
w2 = _projection_simplex(V.ravel(), z=1)
assert_array_almost_equal(w, w2)
# Axis = 1 case.
W = projection_simplex(V, axis=1)
# Check same as with for loop.
W2 = np.array([_projection_simplex(V[i]) for i in range(V.shape[0])])
assert_array_almost_equal(W, W2)
# Check works with vector z.
W3 = projection_simplex(V, np.ones(V.shape[0]), axis=1)
assert_array_almost_equal(W, W3)
# Axis = 0 case.
W = projection_simplex(V, axis=0)
# Check same as with for loop.
W2 = np.array([_projection_simplex(V[:, i]) for i in range(V.shape[1])]).T
assert_array_almost_equal(W, W2)
# Check works with vector z.
W3 = projection_simplex(V, np.ones(V.shape[1]), axis=0)
assert_array_almost_equal(W, W3)
| 28.473684 | 78 | 0.650647 | [
"BSD-2-Clause"
] | cptq/smooth-ot | smoothot/tests/test_projection.py | 1,623 | Python |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import sleep
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _make_fake_dataset_fn():
"""Returns a dataset that emulates a remote storage data source.
Returns a dataset factory which creates a dataset with 100 elements that
emulates the performance characteristic of a file-based dataset stored in a
remote storage. In particular, the first element will take an order of
magnitude longer to produce than the remaining elements (1s vs. 1ms).
"""
def fake_dataset_fn(unused):
del unused
def make_dataset(time_us, num_elements):
return dataset_ops.Dataset.range(num_elements).apply(sleep.sleep(time_us))
return make_dataset(1000 * 1000, 0).concatenate(make_dataset(1000,
100)).take(100)
return fake_dataset_fn
class ParallelInterleaveBenchmark(test.Benchmark):
"""Benchmarks for `tf.data.experimental.parallel_interleave()`."""
def _benchmark(self, dataset_fn, iters, num_elements):
with ops.Graph().as_default():
options = dataset_ops.Options()
options.experimental_optimization.apply_default_optimizations = False
dataset = dataset_fn().with_options(options)
next_element = dataset_ops.make_one_shot_iterator(dataset).get_next()
with session.Session() as sess:
deltas = []
for _ in range(iters):
start = time.time()
for _ in range(num_elements):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
mean_wall_time = np.mean(deltas) / num_elements
self.report_benchmark(iters=iters, wall_time=mean_wall_time)
def benchmark_sequential_interleave(self):
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(), cycle_length=10)
self._benchmark(dataset_fn=dataset_fn, iters=10, num_elements=100)
def benchmark_parallel_interleave_v1(self):
"""Benchmark for parallel interleave that does not support autotuning."""
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().apply(
interleave_ops.parallel_interleave(
_make_fake_dataset_fn(), cycle_length=10))
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
def benchmark_parallel_interleave_v2(self):
"""Benchmark for parallel interleave that supports autotuning."""
def dataset_fn():
return dataset_ops.Dataset.range(1).repeat().interleave(
_make_fake_dataset_fn(),
cycle_length=10, num_parallel_calls=dataset_ops.AUTOTUNE)
self._benchmark(dataset_fn=dataset_fn, iters=100, num_elements=1000)
if __name__ == "__main__":
test.main()
| 36.780952 | 80 | 0.71854 | [
"Apache-2.0"
] | 1244783394/tensorflow | tensorflow/python/data/experimental/benchmarks/parallel_interleave_benchmark.py | 3,862 | Python |
from .multilanguage import antlr4, waxeye
from .python import TatSu, arpeggio, parglare, parsimonious
| 34 | 59 | 0.823529 | [
"Unlicense"
] | KOLANICH/UniGrammarRuntime.py | UniGrammarRuntime/backends/__init__.py | 102 | Python |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyStevedore(PythonPackage):
"""Manage Dynamic Plugins for Python Applications."""
homepage = "https://docs.openstack.org/stevedore/latest/"
pypi = "stevedore/stevedore-1.28.0.tar.gz"
version('1.28.0', sha256='f1c7518e7b160336040fee272174f1f7b29a46febb3632502a8f2055f973d60b')
depends_on('python@2.6:')
depends_on('py-six@1.10.0:', type=('build', 'run'))
depends_on('py-pbr@2.0.0:2.1.0', type=('build', 'run'))
| 31.809524 | 96 | 0.714072 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 0luhancheng0/spack | var/spack/repos/builtin/packages/py-stevedore/package.py | 668 | Python |
# Tradingview Technical Analysis (tradingview-ta)
# Author: deathlyface (https://github.com/deathlyface)
# Rewritten from https://www.tradingview.com/static/bundles/technicals.f2e6e6a51aebb6cd46f8.js
# License: MIT
class Recommendation:
buy = "BUY"
strong_buy = "STRONG_BUY"
sell = "SELL"
strong_sell = "STRONG_SELL"
neutral = "NEUTRAL"
error = "ERROR"
class Compute:
def MA(ma, close):
"""Compute Moving Average
Args:
ma (float): MA value
close (float): Close value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (ma < close):
return Recommendation.buy
elif (ma > close):
return Recommendation.sell
else:
return Recommendation.neutral
def RSI(rsi, rsi1):
"""Compute Relative Strength Index
Args:
rsi (float): RSI value
rsi1 (float): RSI[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (rsi < 30 and rsi1 > rsi):
return Recommendation.buy
elif (rsi > 70 and rsi1 < rsi):
return Recommendation.sell
else:
return Recommendation.neutral
def Stoch(k, d, k1, d1):
"""Compute Stochastic
Args:
k (float): Stoch.K value
d (float): Stoch.D value
k1 (float): Stoch.K[1] value
d1 (float): Stoch.D[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (k < 20 and d < 20 and k > d and k1 < d1):
return Recommendation.buy
elif (k > 80 and d > 80 and k < d and k1 > d1):
return Recommendation.sell
else:
return Recommendation.neutral
def CCI20(cci20, cci201):
"""Compute Commodity Channel Index 20
Args:
cci20 (float): CCI20 value
cci201 ([type]): CCI20[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (cci20 < -100 and cci20 > cci201):
return Recommendation.buy
elif (cci20 > 100 and cci20 < cci201):
return Recommendation.sell
else:
return Recommendation.neutral
def ADX(adx, adxpdi, adxndi, adxpdi1, adxndi1):
"""Compute Average Directional Index
Args:
adx (float): ADX value
adxpdi (float): ADX+DI value
adxndi (float): ADX-DI value
adxpdi1 (float): ADX+DI[1] value
adxndi1 (float): ADX-DI[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (adx > 20 and adxpdi1 < adxndi1 and adxpdi > adxndi):
return Recommendation.buy
elif (adx > 20 and adxpdi1 > adxndi1 and adxpdi < adxndi):
return Recommendation.sell
else:
return Recommendation.neutral
def AO(ao, ao1):
"""Compute Awesome Oscillator
Args:
ao (float): AO value
ao1 (float): AO[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (ao > 0 and ao1 < 0 or ao > 0 and ao1 > 0 and ao > ao1):
return Recommendation.buy
elif (ao < 0 and ao1 > 0 or ao < 0 and ao1 < 0 and ao < ao1):
return Recommendation.sell
else:
return Recommendation.neutral
def Mom(mom, mom1):
"""Compute Momentum
Args:
mom (float): Mom value
mom1 (float): Mom[1] value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (mom < mom1):
return Recommendation.buy
elif (mom > mom1):
return Recommendation.sell
else:
return Recommendation.neutral
def MACD(macd, signal):
"""Compute Moving Average Convergence/Divergence
Args:
macd (float): MACD.macd value
signal (float): MACD.signal value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (macd > signal):
return Recommendation.buy
elif (macd < signal):
return Recommendation.sell
else:
return Recommendation.neutral
def BBBuy(close, bblower):
"""Compute Bull Bear Buy
Args:
close (float): close value
bblower (float): BB.lower value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (close < bblower):
return Recommendation.buy
else:
return Recommendation.neutral
def BBSell(close, bbupper):
"""Compute Bull Bear Sell
Args:
close (float): close value
bbupper (float): BB.upper value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (close > bbupper):
return Recommendation.sell
else:
return Recommendation.neutral
def PSAR(psar, open):
"""Compute Parabolic Stop-And-Reverse
Args:
psar (float): P.SAR value
open (float): open value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (psar < open):
return Recommendation.buy
elif (psar > open):
return Recommendation.sell
else:
return Recommendation.neutral
def Recommend(value):
"""Compute Recommend
Args:
value (float): recommend value
Returns:
string: "STRONG_BUY", "BUY", "NEUTRAL", "SELL", "STRONG_SELL", or "ERROR"
"""
if (value >= -1 and value < -.5):
return Recommendation.strong_sell
elif (value >= -.5 and value < 0):
return Recommendation.sell
elif (value == 0):
return Recommendation.neutral
elif (value > 0 and value <= .5):
return Recommendation.buy
elif (value > .5 and value <= 1):
return Recommendation.strong_buy
else:
return Recommendation.error
def Simple(value):
"""Compute Simple
Args:
value (float): Rec.X value
Returns:
string: "BUY", "SELL", or "NEUTRAL"
"""
if (value == -1):
return Recommendation.sell
elif (value == 1):
return Recommendation.buy
else:
return Recommendation.neutral
| 27.1875 | 94 | 0.517241 | [
"MIT"
] | Chizkiyahu/python-tradingview-ta | tradingview_ta/technicals.py | 6,525 | Python |
import sys
sys.path.append('./datastructures')
from datastructures import Stack, StackNode
class SetOfStacks:
LIMIT_PER_STACK = 2
def __init__(self):
self.main_stack = Stack()
def pop(self):
if self.is_empty():
return None
elif self._top_stack().is_empty():
self.main_stack.pop()
self.pop()
return self._top_stack().pop()
def push(self, item):
if self.is_empty():
self.main_stack.push(Stack())
self._top_stack().push(item)
def is_empty(self):
return self.main_stack.is_empty()
def peek(self):
if self.is_empty():
return None
return self._top_stack().peek().value
def _top_stack(self):
return self.main_stack.peek()
if __name__ == '__main__': # tests
stacks = SetOfStacks()
assert stacks.peek() is None
stacks.push(StackNode(1))
assert stacks.peek() == 1
stacks.push(StackNode(2))
assert stacks.peek() == 2
stacks.push(StackNode(3))
assert stacks.pop().value == 3
assert stacks.pop().value == 2
assert stacks.pop().value == 1
assert stacks.is_empty() is not None | 20.586207 | 45 | 0.600503 | [
"MIT"
] | italo-batista/competitiveProgramming | cracking-code-interview/chapter_03/3-3_stack_of_plates.py | 1,194 | Python |
import os
import copy
import numpy as np
import click
from typing import List, Optional
import torch
import pickle
def extract_conv_names(model):
model_names = list(name for name in model.keys())
return model_names
def blend_models(low, high, model_res, level):
levels = [x for x in range(level)]
low_names = extract_conv_names(low)
high_names = extract_conv_names(high)
assert all((x == y for x, y in zip(low_names, high_names)))
#start with lower model and add weights above
model_out = copy.deepcopy(low)
for name in high.keys():
if any(f'convs.{lvl}' in name for lvl in levels):
continue
if any(f'to_rgbs.{lvl // 2}' in name for lvl in levels):
continue
if any(f'noises.noise_{lvl}' in name for lvl in levels):
continue
if ('style' in name):
continue
if ('conv1' in name):
continue
if ('to_rgb1' in name):
continue
if ('input.input' in name):
continue
# print(name)
model_out[name] = high[name].clone()
return model_out
#----------------------------------------------------------------------------
@click.command()
@click.pass_context
@click.option('--lower_res_pkl', help='Network pickle filename for lower resolutions', required=True)
@click.option('--higher_res_pkl', help='Network pickle filename for higher resolutions', required=True)
@click.option('--output_path','out', help='Network pickle filepath for output', default='./blended.pt')
@click.option('--model_res', type=int, help='Output resolution of model (likely 1024, 512, or 256)', default=64, show_default=True)
@click.option('--split_lvl', type=int, help='Resolution to split model weights', default=4, show_default=True)
def create_blended_model(
ctx: click.Context,
lower_res_pkl: str,
higher_res_pkl: str,
model_res: Optional[int],
split_lvl: Optional[int],
out: Optional[str],
):
lo_G_ema = torch.load(lower_res_pkl, map_location=torch.device('cpu'))['g_ema']
hi = torch.load(higher_res_pkl, map_location=torch.device('cpu'))['g_ema']
model_out = blend_models(lo_G_ema, hi, model_res, split_lvl)
torch.save(model_out, out)
#----------------------------------------------------------------------------
if __name__ == "__main__":
create_blended_model() # pylint: disable=no-value-for-parameter
#---------------------------------------------------------------------------- | 32.701299 | 131 | 0.603654 | [
"MIT",
"BSD-2-Clause",
"Apache-2.0"
] | jscarlson/stylegan2-pytorch | blend.py | 2,518 | Python |
# Copyright 2019 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains an N-styles style transfer model on the cheap.
Training is done by finetuning the instance norm parameters of a pre-trained
N-styles style transfer model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import os
from magenta.models.image_stylization import image_utils
from magenta.models.image_stylization import learning
from magenta.models.image_stylization import model
from magenta.models.image_stylization import vgg
import tensorflow as tf
from tensorflow.contrib import slim as contrib_slim
slim = contrib_slim
DEFAULT_CONTENT_WEIGHTS = '{"vgg_16/conv3": 1.0}'
DEFAULT_STYLE_WEIGHTS = ('{"vgg_16/conv1": 1e-4, "vgg_16/conv2": 1e-4,'
' "vgg_16/conv3": 1e-4, "vgg_16/conv4": 1e-4}')
flags = tf.app.flags
flags.DEFINE_float('clip_gradient_norm', 0, 'Clip gradients to this norm')
flags.DEFINE_float('learning_rate', 1e-3, 'Learning rate')
flags.DEFINE_integer('batch_size', 16, 'Batch size.')
flags.DEFINE_integer('image_size', 256, 'Image size.')
flags.DEFINE_integer('num_styles', None, 'Number of styles.')
flags.DEFINE_float('alpha', 1.0, 'Width multiplier')
flags.DEFINE_integer('ps_tasks', 0,
'Number of parameter servers. If 0, parameters '
'are handled locally by the worker.')
flags.DEFINE_integer('save_summaries_secs', 15,
'Frequency at which summaries are saved, in seconds.')
flags.DEFINE_integer('save_interval_secs', 15,
'Frequency at which the model is saved, in seconds.')
flags.DEFINE_integer('task', 0,
'Task ID. Used when training with multiple '
'workers to identify each worker.')
flags.DEFINE_integer('train_steps', 40000, 'Number of training steps.')
flags.DEFINE_string('checkpoint', None,
'Checkpoint file for the pretrained model.')
flags.DEFINE_string('content_weights', DEFAULT_CONTENT_WEIGHTS,
'Content weights')
flags.DEFINE_string('master', '',
'Name of the TensorFlow master to use.')
flags.DEFINE_string('style_coefficients', None,
'Scales the style weights conditioned on the style image.')
flags.DEFINE_string('style_dataset_file', None, 'Style dataset file.')
flags.DEFINE_string('style_weights', DEFAULT_STYLE_WEIGHTS, 'Style weights')
flags.DEFINE_string('train_dir', None,
'Directory for checkpoints and summaries.')
FLAGS = flags.FLAGS
def main(unused_argv=None):
with tf.Graph().as_default():
# Force all input processing onto CPU in order to reserve the GPU for the
# forward inference and back-propagation.
device = '/cpu:0' if not FLAGS.ps_tasks else '/job:worker/cpu:0'
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks,
worker_device=device)):
inputs, _ = image_utils.imagenet_inputs(FLAGS.batch_size,
FLAGS.image_size)
# Load style images and select one at random (for each graph execution, a
# new random selection occurs)
_, style_labels, style_gram_matrices = image_utils.style_image_inputs(
os.path.expanduser(FLAGS.style_dataset_file),
batch_size=FLAGS.batch_size, image_size=FLAGS.image_size,
square_crop=True, shuffle=True)
with tf.device(tf.train.replica_device_setter(FLAGS.ps_tasks)):
# Process style and weight flags
num_styles = FLAGS.num_styles
if FLAGS.style_coefficients is None:
style_coefficients = [1.0 for _ in range(num_styles)]
else:
style_coefficients = ast.literal_eval(FLAGS.style_coefficients)
if len(style_coefficients) != num_styles:
raise ValueError(
'number of style coefficients differs from number of styles')
content_weights = ast.literal_eval(FLAGS.content_weights)
style_weights = ast.literal_eval(FLAGS.style_weights)
# Rescale style weights dynamically based on the current style image
style_coefficient = tf.gather(
tf.constant(style_coefficients), style_labels)
style_weights = dict((key, style_coefficient * value)
for key, value in style_weights.items())
# Define the model
stylized_inputs = model.transform(
inputs,
alpha=FLAGS.alpha,
normalizer_params={
'labels': style_labels,
'num_categories': num_styles,
'center': True,
'scale': True
})
# Compute losses.
total_loss, loss_dict = learning.total_loss(
inputs, stylized_inputs, style_gram_matrices, content_weights,
style_weights)
for key, value in loss_dict.items():
tf.summary.scalar(key, value)
instance_norm_vars = [var for var in slim.get_variables('transformer')
if 'InstanceNorm' in var.name]
other_vars = [var for var in slim.get_variables('transformer')
if 'InstanceNorm' not in var.name]
# Function to restore VGG16 parameters.
init_fn_vgg = slim.assign_from_checkpoint_fn(vgg.checkpoint_file(),
slim.get_variables('vgg_16'))
# Function to restore N-styles parameters.
init_fn_n_styles = slim.assign_from_checkpoint_fn(
os.path.expanduser(FLAGS.checkpoint), other_vars)
def init_fn(session):
init_fn_vgg(session)
init_fn_n_styles(session)
# Set up training.
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
train_op = slim.learning.create_train_op(
total_loss, optimizer, clip_gradient_norm=FLAGS.clip_gradient_norm,
variables_to_train=instance_norm_vars, summarize_gradients=False)
# Run training.
slim.learning.train(
train_op=train_op,
logdir=os.path.expanduser(FLAGS.train_dir),
master=FLAGS.master,
is_chief=FLAGS.task == 0,
number_of_steps=FLAGS.train_steps,
init_fn=init_fn,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| 41.35119 | 80 | 0.678854 | [
"Apache-2.0"
] | Surya130499/magenta | magenta/models/image_stylization/image_stylization_finetune.py | 6,947 | Python |
import cv2
import numpy as np
# Gray scale
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
# LoG filter
def LoG_filter(img, K_size=5, sigma=3):
H, W, C = img.shape
# zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)
tmp = out.copy()
# LoG Kernel
K = np.zeros((K_size, K_size), dtype=np.float)
for x in range(-pad, -pad + K_size):
for y in range(-pad, -pad + K_size):
K[y + pad, x + pad] = (x ** 2 + y ** 2 - sigma ** 2) * np.exp( -(x ** 2 + y ** 2) / (2 * (sigma ** 2)))
K /= (2 * np.pi * (sigma ** 6))
K /= K.sum()
print(K)
# filtering
for y in range(H):
for x in range(W):
out[pad + y, pad + x] = np.sum(K * tmp[y: y + K_size, x: x + K_size])
out = np.clip(out, 0, 255)
out = out[pad: pad + H, pad: pad + W].astype(np.uint8)
return out
# Read image
img = cv2.imread("imori_noise.jpg")
# grayscale
gray = BGR2GRAY(img)
# LoG filtering
out = LoG_filter(gray, K_size=5, sigma=3)
# Save result
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 23.096774 | 119 | 0.511872 | [
"MIT"
] | OverHall27/Gasyori100knock | Question_11_20/answers/answer_19.py | 1,432 | Python |
import logging
import os
import sys
import warnings
from collections import namedtuple
from typing import *
import matplotlib.image
import matplotlib.pyplot as plt
from torch import Tensor
from torch.utils.tensorboard import SummaryWriter
from booster import Diagnostic
from .datatracker import DataTracker
BestScore = namedtuple('BestScore', ['step', 'epoch', 'value', 'summary'])
class BaseLogger():
def __init__(self, key, logdir):
self.key = key
self.logdir = logdir
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
raise NotImplementedError
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
raise NotImplementedError
class TensorboardLogger(BaseLogger):
def __init__(self, *args, **kwargs):
super().__init__(*args)
self.writer = SummaryWriter(os.path.join(self.logdir, self.key))
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
summary.log(self.writer, global_step)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
self.writer.add_image(key, img_tensor, global_step=global_step)
class LoggingLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.logger = logging.getLogger(self.key)
# logFormatter = logging.Formatter('%(asctime)s %(name)-4s %(levelname)-4s %(message)s')
#
# fileHandler = logging.FileHandler(os.path.join(self.logdir, 'run.log'))
# fileHandler.setFormatter(logFormatter)
# self.logger.addHandler(fileHandler)
#
# consoleHandler = logging.StreamHandler(sys.stdout)
# consoleHandler.setFormatter(logFormatter)
# self.logger.addHandler(consoleHandler)
self.logger.setLevel(logging.INFO)
self.diagnostic_keys = diagnostic_keys
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, best_score: Optional[BestScore] = None,
**kwargs):
for stats_key in self.diagnostic_keys:
if not stats_key in summary.keys():
self.logger.warning('key ' + str(stats_key) + ' not in summary.')
else:
message = f'[{global_step} / {epoch}] '
message += ''.join([f'{k} {v:6.2f} ' for k, v in summary.get(stats_key).items()])
if "info" in summary.keys() and "elapsed-time" in summary["info"].keys():
message += f'({summary["info"]["elapsed-time"]:.2f}s /iter)'
else:
warnings.warn(
f"Summary does not contain the key info/elapsed-time. The elapsed time won't be displayed.")
if best_score is not None:
message += f' (best: {best_score.value:6.2f} [{best_score.step} | {best_score.epoch}])'
self.logger.info(message)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
pass
class PlotLogger(BaseLogger):
def __init__(self, *args, diagnostic_keys=['loss'], **kwargs):
super().__init__(*args)
self.diagnostic_keys = diagnostic_keys
self.tracker = DataTracker(label=self.key)
def log_diagnostic(self, global_step: int, epoch: int, summary: Diagnostic, **kwargs):
for key in self.diagnostic_keys:
self.tracker.append(global_step, summary[key])
def plot(self, *args, **kwargs):
self.tracker.plot(*args, **kwargs)
def log_image(self, key: str, global_step: int, epoch: int, img_tensor: Tensor):
img = img_tensor.data.permute(1, 2, 0).cpu().numpy()
matplotlib.image.imsave(os.path.join(self.logdir, f"{key}.png"), img)
class PlotHandler(List):
def __init__(self, logdir, *args, **kwargs):
super().__init__(*args, **kwargs)
self.path = os.path.join(logdir, "curves.png")
def plot(self):
if len(self):
logger = self[0]
keys = logger.tracker.data.keys()
plt.figure(figsize=(4 * len(keys), 3))
for i, key in enumerate(keys):
plt.subplot(1, len(keys), i + 1)
plt.title(key)
for logger in self:
logger.plot(key)
plt.legend()
plt.savefig(self.path)
class Logger(BaseLogger):
def __init__(self, key, logdir, tensorboard=True, logging=True, plot=True, **kwargs):
super().__init__(key, logdir)
self.loggers = []
if tensorboard:
self.loggers += [TensorboardLogger(key, logdir, **kwargs)]
if logging:
self.loggers += [LoggingLogger(key, logdir, **kwargs)]
if plot:
self.loggers += [PlotLogger(key, logdir, **kwargs)]
def log_diagnostic(self, *args, **kwargs):
for logger in self.loggers:
logger.log_diagnostic(*args, **kwargs)
def log_image(self, *args, **kwargs):
for logger in self.loggers:
logger.log_image(*args, **kwargs)
class LoggerManager():
def __init__(self, logdir, **kwargs):
self.logdir = logdir
self.kwargs = kwargs
self.loggers = {}
self.plot_handler = PlotHandler(self.logdir)
def init_logger(self, key):
self.loggers[key] = Logger(key, self.logdir, **self.kwargs)
# mappend PlotLogger to PlotHandler
for logger in self.loggers[key].loggers:
if isinstance(logger, PlotLogger):
self.plot_handler.append(logger)
def log_diagnostic(self, key, step, epoch, summary, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_diagnostic(step, epoch, summary, **kwargs)
self.plot_handler.plot()
def log_image(self, key, image_key, step, epoch, img_tensor, **kwargs):
if key not in self.loggers:
self.init_logger(key)
self.loggers[key].log_image(image_key, step, epoch, img_tensor, **kwargs)
| 33.801105 | 119 | 0.61932 | [
"MIT"
] | vlievin/booster-pytorch | booster/logging/logger.py | 6,118 | Python |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ..fluid.layer_helper import LayerHelper
from ..framework import _varbase_creator, _dygraph_tracer
from ..fluid.data_feeder import check_variable_and_dtype, check_type, check_dtype
from ..static import Variable
from ..fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from ..fluid.layers import transpose, cast # noqa: F401
from ..fluid import layers
import paddle
from paddle.common_ops_import import core
from paddle.common_ops_import import VarDesc
from paddle import _C_ops
__all__ = []
def matmul(x, y, transpose_x=False, transpose_y=False, name=None):
"""
Applies matrix multiplication to two tensors. `matmul` follows
the complete broadcast rules,
and its behavior is consistent with `np.matmul`.
Currently, the input tensors' number of dimensions can be any, `matmul` can be used to
achieve the `dot`, `matmul` and `batchmatmul`.
The actual behavior depends on the shapes of :math:`x`, :math:`y` and the
flag values of :attr:`transpose_x`, :attr:`transpose_y`. Specifically:
- If a transpose flag is specified, the last two dimensions of the tensor
are transposed. If the tensor is ndim-1 of shape, the transpose is invalid. If the tensor
is ndim-1 of shape :math:`[D]`, then for :math:`x` it is treated as :math:`[1, D]`, whereas
for :math:`y` it is the opposite: It is treated as :math:`[D, 1]`.
The multiplication behavior depends on the dimensions of `x` and `y`. Specifically:
- If both tensors are 1-dimensional, the dot product result is obtained.
- If both tensors are 2-dimensional, the matrix-matrix product is obtained.
- If the `x` is 1-dimensional and the `y` is 2-dimensional,
a `1` is prepended to its dimension in order to conduct the matrix multiply.
After the matrix multiply, the prepended dimension is removed.
- If the `x` is 2-dimensional and `y` is 1-dimensional,
the matrix-vector product is obtained.
- If both arguments are at least 1-dimensional and at least one argument
is N-dimensional (where N > 2), then a batched matrix multiply is obtained.
If the first argument is 1-dimensional, a 1 is prepended to its dimension
in order to conduct the batched matrix multiply and removed after.
If the second argument is 1-dimensional, a 1 is appended to its
dimension for the purpose of the batched matrix multiple and removed after.
The non-matrix (exclude the last two dimensions) dimensions are
broadcasted according the broadcast rule.
For example, if input is a (j, 1, n, m) tensor and the other is a (k, m, p) tensor,
out will be a (j, k, n, p) tensor.
Args:
x (Tensor): The input tensor which is a Tensor.
y (Tensor): The input tensor which is a Tensor.
transpose_x (bool): Whether to transpose :math:`x` before multiplication.
transpose_y (bool): Whether to transpose :math:`y` before multiplication.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The output Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
# vector * vector
x_data = np.random.random([10]).astype(np.float32)
y_data = np.random.random([10]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [1]
# matrix * vector
x_data = np.random.random([10, 5]).astype(np.float32)
y_data = np.random.random([5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10]
# batched matrix * broadcasted vector
x_data = np.random.random([10, 5, 2]).astype(np.float32)
y_data = np.random.random([2]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 5]
# batched matrix * batched matrix
x_data = np.random.random([10, 5, 2]).astype(np.float32)
y_data = np.random.random([10, 2, 5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 5, 5]
# batched matrix * broadcasted matrix
x_data = np.random.random([10, 1, 5, 2]).astype(np.float32)
y_data = np.random.random([1, 3, 2, 5]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.matmul(x, y)
print(z.numpy().shape)
# [10, 3, 5, 5]
"""
if in_dygraph_mode():
return _C_ops.final_state_matmul(x, y, transpose_x, transpose_y)
if _in_legacy_dygraph():
op_type = 'matmul_v2'
op = getattr(_C_ops, op_type)
return op(x, y, 'trans_x', transpose_x, 'trans_y', transpose_y)
attrs = {
'trans_x': transpose_x,
'trans_y': transpose_y,
}
def __check_input(x, y):
var_names = {'x': x, 'y': y}
for name, val in var_names.items():
check_variable_and_dtype(
val, name,
['float16', 'float32', 'float64', 'complex64', 'complex128'],
'matmul')
__check_input(x, y)
helper = LayerHelper('matmul_v2', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def norm(x, p='fro', axis=None, keepdim=False, name=None):
"""
Returns the matrix norm (Frobenius) or vector norm (the 1-norm, the Euclidean
or 2-norm, and in general the p-norm for p > 0) of a given tensor.
.. note::
This norm API is different from `numpy.linalg.norm`.
This api supports high-order input tensors (rank >= 3), and certain axis need to be pointed out to calculate the norm.
But `numpy.linalg.norm` only supports 1-D vector or 2-D matrix as input tensor.
For p-order matrix norm, this api actually treats matrix as a flattened vector to calculate the vector norm, NOT REAL MATRIX NORM.
Args:
x (Tensor): The input tensor could be N-D tensor, and the input data
type could be float32 or float64.
p (float|string, optional): Order of the norm. Supported values are `fro`, `0`, `1`, `2`,
`inf`, `-inf` and any positive real number yielding the corresponding p-norm. Not supported: ord < 0 and nuclear norm.
Default value is `fro`.
axis (int|list|tuple, optional): The axis on which to apply norm operation. If axis is int
or list(int)/tuple(int) with only one element, the vector norm is computed over the axis.
If `axis < 0`, the dimension to norm operation is rank(input) + axis.
If axis is a list(int)/tuple(int) with two elements, the matrix norm is computed over the axis.
Defalut value is `None`.
keepdim (bool, optional): Whether to reserve the reduced dimension in the
output Tensor. The result tensor will have fewer dimension
than the :attr:`input` unless :attr:`keepdim` is true, default
value is False.
name (str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: results of norm operation on the specified axis of input tensor,
it's data type is the same as input's Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
shape=[2, 3, 4]
np_input = np.arange(24).astype('float32') - 12
np_input = np_input.reshape(shape)
x = paddle.to_tensor(np_input)
#[[[-12. -11. -10. -9.] [ -8. -7. -6. -5.] [ -4. -3. -2. -1.]]
# [[ 0. 1. 2. 3.] [ 4. 5. 6. 7.] [ 8. 9. 10. 11.]]]
# compute frobenius norm along last two dimensions.
out_fro = paddle.linalg.norm(x, p='fro', axis=[0,1])
# out_fro.numpy() [17.435596 16.911535 16.7332 16.911535]
# compute 2-order vector norm along last dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=-1)
#out_pnorm.numpy(): [[21.118711 13.190906 5.477226]
# [ 3.7416575 11.224972 19.131126]]
# compute 2-order norm along [0,1] dimension.
out_pnorm = paddle.linalg.norm(x, p=2, axis=[0,1])
#out_pnorm.numpy(): [17.435596 16.911535 16.7332 16.911535]
# compute inf-order norm
out_pnorm = paddle.linalg.norm(x, p=np.inf)
#out_pnorm.numpy() = [12.]
out_pnorm = paddle.linalg.norm(x, p=np.inf, axis=0)
#out_pnorm.numpy(): [[12. 11. 10. 9.] [8. 7. 6. 7.] [8. 9. 10. 11.]]
# compute -inf-order norm
out_pnorm = paddle.linalg.norm(x, p=-np.inf)
#out_pnorm.numpy(): [0.]
out_pnorm = paddle.linalg.norm(x, p=-np.inf, axis=0)
#out_pnorm.numpy(): [[0. 1. 2. 3.] [4. 5. 6. 5.] [4. 3. 2. 1.]]
"""
def frobenius_norm(input, dim=None, keepdim=False, name=None):
"""
The frobenius norm OP is to calculate the frobenius norm of certain two dimensions of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
dim (list, optional): None for last two dimensions.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
if dim is not None and not (isinstance(dim, list) and len(dim) == 2):
raise ValueError(
"The dim of frobenius norm op should be None or two elements list!"
)
if paddle.in_dynamic_mode():
if dim is None:
return _C_ops.frobenius_norm(input, 'keep_dim', keepdim,
'reduce_all', True)
return _C_ops.frobenius_norm(input, 'dim', dim, 'keep_dim', keepdim,
'reduce_all', False)
attrs = {'dim': dim, 'keep_dim': keepdim, 'reduce_all': False}
if dim is None:
attrs['reduce_all'] = True
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'frobenius_norm')
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='frobenius_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def vector_norm(input,
porder=None,
axis=None,
keepdim=False,
asvector=False,
name=None):
"""
Calculate the p-order vector norm for certain dimension of Tensor `input`.
Args:
input (Variable): Tensor, data type float32, float64.
porder (float, optional): None for porder=2.0.
axis (int, optional): None for last dimension.
keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False.
"""
if paddle.in_dynamic_mode():
if axis is None: axis = -1
return _C_ops.p_norm(input, 'porder', porder, 'axis', axis,
'keepdim', keepdim, 'asvector', asvector)
if porder is not None:
check_type(porder, 'porder', (float, int), 'p_norm')
if axis is not None:
check_type(axis, 'axis', (int), 'p_norm')
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'p_norm')
attrs = {
'axis': axis if axis is not None else -1,
'porder': float(porder) if porder is not None else 2.0,
'keepdim': keepdim,
'asvector': asvector,
'epsilon': 1e-12,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(
type='p_norm',
inputs={'X': input},
outputs={'Out': out},
attrs=attrs)
return out
def inf_norm(input,
porder=None,
axis=axis,
keepdim=False,
asvector=False,
name=None):
helper = LayerHelper('frobenius_norm', **locals())
out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
helper.append_op(type='abs', inputs={'X': input}, outputs={'Out': out})
reduce_out = helper.create_variable_for_type_inference(
dtype=helper.input_dtype())
reduce_all = True if axis == None or axis == [] or asvector == True else False
axis = axis if axis != None and axis != [] else [0]
reduce_type = 'reduce_max' if porder == np.float(
'inf') else 'reduce_min'
helper.append_op(
type=reduce_type,
inputs={'X': out},
outputs={'Out': reduce_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
return reduce_out
def p_matrix_norm(input, porder=1., axis=axis, keepdim=False, name=None):
"""
NOTE:
This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm.
"""
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
abs_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='abs', inputs={'X': input}, outputs={'Out': abs_out})
pow_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='pow',
inputs={'X': abs_out},
outputs={'Out': pow_out},
attrs={'factor': porder})
sum_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': sum_out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': True if axis is None else False
})
porder
block.append_op(
type='pow',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={'factor': float(1. / porder)})
return out
if axis is None and p is not None:
if isinstance(p, str):
if p == "fro":
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
x,
porder=p,
axis=axis,
keepdim=keepdim,
asvector=True,
name=name)
else:
raise ValueError("only valid p type is string or float, found {}".
format(type(p)))
if isinstance(axis, tuple):
axis = list(axis)
if isinstance(axis, list) and len(axis) == 1:
axis = axis[0]
#calculate vector norm, where axis is int or list with only one integer
if isinstance(axis, int):
if isinstance(p, str):
if p == "fro":
return vector_norm(
x,
porder=2,
axis=axis,
keepdim=keepdim,
asvector=False,
name=name)
else:
raise ValueError(
"only valid string values are 'fro', found {}".format(p))
elif isinstance(p, (int, float)):
return vector_norm(
x,
axis=axis,
porder=p,
keepdim=keepdim,
asvector=False,
name=name)
else:
raise ValueError(
"unspport p for p-order vector norm. except float, found {}".
format(p))
#calculate matrix norm, where axis is list with two integers
elif isinstance(axis, list) and len(axis) == 2:
if p == "fro":
return frobenius_norm(x, dim=axis, keepdim=keepdim, name=name)
elif p == np.inf or p == -np.inf:
return inf_norm(x, porder=p, axis=axis, keepdim=keepdim, name=name)
elif p == 0:
raise ValueError(
"just suport axis type int or list (length of list <=1) if p = 0, found {}".
format(axis))
else:
return p_matrix_norm(
x, porder=p, axis=axis, keepdim=keepdim, name=name)
else:
raise ValueError(
"except axis type int or list (length of list <=2), found {}".
format(axis))
def dist(x, y, p=2, name=None):
r"""
This OP returns the p-norm of (x - y). It is not a norm in a strict sense, only as a measure
of distance. The shapes of x and y must be broadcastable. The definition is as follows, for
details, please refer to the `numpy's broadcasting <https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html>`_:
- Each input has at least one dimension.
- Match the two input dimensions from back to front, the dimension sizes must either be equal, one of them is 1, or one of them does not exist.
Where, z = x - y, the shapes of x and y are broadcastable, then the shape of z can be
obtained as follows:
1. If the number of dimensions of x and y are not equal, prepend 1 to the dimensions of the
tensor with fewer dimensions.
For example, The shape of x is [8, 1, 6, 1], the shape of y is [7, 1, 5], prepend 1 to the
dimension of y.
x (4-D Tensor): 8 x 1 x 6 x 1
y (4-D Tensor): 1 x 7 x 1 x 5
2. Determine the size of each dimension of the output z: choose the maximum value from the
two input dimensions.
z (4-D Tensor): 8 x 7 x 6 x 5
If the number of dimensions of the two inputs are the same, the size of the output can be
directly determined in step 2. When p takes different values, the norm formula is as follows:
When p = 0, defining $0^0=0$, the zero-norm of z is simply the number of non-zero elements of z.
.. math::
||z||_{0}=\lim_{p \\rightarrow 0}\sum_{i=1}^{m}|z_i|^{p}
When p = inf, the inf-norm of z is the maximum element of z.
.. math::
||z||_\infty=\max_i |z_i|
When p = -inf, the negative-inf-norm of z is the minimum element of z.
.. math::
||z||_{-\infty}=\min_i |z_i|
Otherwise, the p-norm of z follows the formula,
.. math::
||z||_{p}=(\sum_{i=1}^{m}|z_i|^p)^{\\frac{1}{p}}
Args:
x (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.
y (Tensor): 1-D to 6-D Tensor, its data type is float32 or float64.
p (float, optional): The norm to be computed, its data type is float32 or float64. Default: 2.
Returns:
Tensor: Tensor that is the p-norm of (x - y).
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor(np.array([[3, 3],[3, 3]]), "float32")
y = paddle.to_tensor(np.array([[3, 3],[3, 1]]), "float32")
out = paddle.dist(x, y, 0)
print(out) # out = [1.]
out = paddle.dist(x, y, 2)
print(out) # out = [2.]
out = paddle.dist(x, y, float("inf"))
print(out) # out = [2.]
out = paddle.dist(x, y, float("-inf"))
print(out) # out = [0.]
"""
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
helper = LayerHelper("dist", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
inputs = {"X": [x], "Y": [y]}
outputs = {'Out': [out]}
attrs = {"p": float(p)}
helper.append_op(
type='dist', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def cond(x, p=None, name=None):
"""
Computes the condition number of a matrix or batches of matrices with respect to a matrix norm ``p``.
Args:
x (Tensor): The input tensor could be tensor of shape ``(*, m, n)`` where ``*`` is zero or more batch dimensions
for ``p`` in ``(2, -2)``, or of shape ``(*, n, n)`` where every matrix is invertible for any supported ``p``.
And the input data type could be ``float32`` or ``float64``.
p (float|string, optional): Order of the norm. Supported values are `fro`, `nuc`, `1`, `-1`, `2`, `-2`,
`inf`, `-inf`. Default value is `None`, meaning that the order of the norm is `2`.
name (str, optional): The default value is `None`. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: computing results of condition number, its data type is the same as input Tensor ``x``.
Examples:
.. code-block:: python
import paddle
import numpy as np
x = paddle.to_tensor([[1., 0, -1], [0, 1, 0], [1, 0, 1]])
# compute conditional number when p is None
out = paddle.linalg.cond(x)
# out.numpy() [1.4142135]
# compute conditional number when order of the norm is 'fro'
out_fro = paddle.linalg.cond(x, p='fro')
# out_fro.numpy() [3.1622777]
# compute conditional number when order of the norm is 'nuc'
out_nuc = paddle.linalg.cond(x, p='nuc')
# out_nuc.numpy() [9.2426405]
# compute conditional number when order of the norm is 1
out_1 = paddle.linalg.cond(x, p=1)
# out_1.numpy() [2.]
# compute conditional number when order of the norm is -1
out_minus_1 = paddle.linalg.cond(x, p=-1)
# out_minus_1.numpy() [1.]
# compute conditional number when order of the norm is 2
out_2 = paddle.linalg.cond(x, p=2)
# out_2.numpy() [1.4142135]
# compute conditional number when order of the norm is -1
out_minus_2 = paddle.linalg.cond(x, p=-2)
# out_minus_2.numpy() [0.70710677]
# compute conditional number when order of the norm is inf
out_inf = paddle.linalg.cond(x, p=np.inf)
# out_inf.numpy() [2.]
# compute conditional number when order of the norm is -inf
out_minus_inf = paddle.linalg.cond(x, p=-np.inf)
# out_minus_inf.numpy() [1.]
a = paddle.to_tensor(np.random.randn(2, 4, 4).astype('float32'))
# a.numpy()
# [[[ 0.14063153 -0.996288 0.7996131 -0.02571543]
# [-0.16303636 1.5534962 -0.49919784 -0.04402903]
# [-1.1341571 -0.6022629 0.5445269 0.29154757]
# [-0.16816919 -0.30972657 1.7521842 -0.5402487 ]]
# [[-0.58081484 0.12402827 0.7229862 -0.55046535]
# [-0.15178485 -1.1604939 0.75810957 0.30971205]
# [-0.9669573 1.0940945 -0.27363303 -0.35416734]
# [-1.216529 2.0018666 -0.7773689 -0.17556527]]]
a_cond_fro = paddle.linalg.cond(a, p='fro')
# a_cond_fro.numpy() [31.572273 28.120834]
b = paddle.to_tensor(np.random.randn(2, 3, 4).astype('float64'))
# b.numpy()
# [[[ 1.61707487 0.46829144 0.38130416 0.82546736]
# [-1.72710298 0.08866375 -0.62518804 0.16128892]
# [-0.02822879 -1.67764516 0.11141444 0.3220113 ]]
# [[ 0.22524372 0.62474921 -0.85503233 -1.03960523]
# [-0.76620689 0.56673047 0.85064753 -0.45158196]
# [ 1.47595418 2.23646462 1.5701758 0.10497519]]]
b_cond_2 = paddle.linalg.cond(b, p=2)
# b_cond_2.numpy() [3.30064451 2.51976252]
"""
def mat_norm(input, porder=1., axis=None):
"""
NOTE:
Calculate the matrix norm of a square matrix or batches of square matrices,
when porder is in (1, -1, inf, -inf)
"""
reduce_all = True if axis is None or axis == [] else False
axis = axis if axis != None and axis != [] else [0]
keepdim = False
if paddle.in_dynamic_mode():
abs_out = _C_ops.abs(input)
sum_out = _C_ops.reduce_sum(abs_out, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
if porder == 1 or porder == np.inf:
return _C_ops.reduce_max(sum_out, 'dim', [-1], 'keepdim',
keepdim, 'reduce_all', reduce_all)
if porder == -1 or porder == -np.inf:
return _C_ops.reduce_min(sum_out, 'dim', [-1], 'keepdim',
keepdim, 'reduce_all', reduce_all)
block = LayerHelper('norm', **locals())
abs_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='abs', inputs={'X': input}, outputs={'Out': abs_out})
block.append_op(
type='reduce_sum',
inputs={'X': abs_out},
outputs={'Out': sum_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
if porder == 1 or porder == np.inf:
block.append_op(
type='reduce_max',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={
'dim': [-1],
'keep_dim': keepdim,
'reduce_all': reduce_all
})
if porder == -1 or porder == -np.inf:
block.append_op(
type='reduce_min',
inputs={'X': sum_out},
outputs={'Out': out},
attrs={
'dim': [-1],
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
def fro_norm(input, porder=2, axis=[-1]):
"""
NOTE:
Calculate the frobenius norm of a square matrix or batches of square matrices.
"""
reduce_all = True if axis is None or axis == [] else False
keepdim = False
if paddle.in_dynamic_mode():
pow_out = _C_ops.pow(input, 'factor', porder)
sum_out_1 = _C_ops.reduce_sum(pow_out, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
sum_out_2 = _C_ops.reduce_sum(sum_out_1, 'dim', axis, 'keepdim',
keepdim, 'reduce_all', reduce_all)
return _C_ops.pow(sum_out_2, 'factor', float(1. / porder))
block = LayerHelper('norm', **locals())
pow_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out_1 = block.create_variable_for_type_inference(
dtype=block.input_dtype())
sum_out_2 = block.create_variable_for_type_inference(
dtype=block.input_dtype())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='pow',
inputs={'X': input},
outputs={'Out': pow_out},
attrs={'factor': porder})
block.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': sum_out_1},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='reduce_sum',
inputs={'X': sum_out_1},
outputs={'Out': sum_out_2},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='pow',
inputs={'X': sum_out_2},
outputs={'Out': out},
attrs={'factor': float(1. / porder)})
return out
def svd_norm(input, porder, axis=[-1]):
"""
NOTE:
Calculate the matrix norm, which is related to singular values, of a matrix
or batches of matrices, including nuclear norm, 2-norm and (-2)-norm.
"""
reduce_all = True if axis is None or axis == [] else False
keepdim = False
u, s, vh = svd(input, full_matrices=False)
if paddle.in_dynamic_mode():
if porder == "nuc":
return _C_ops.reduce_sum(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
max_out = _C_ops.reduce_max(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
min_out = _C_ops.reduce_min(s, 'dim', axis, 'keepdim', keepdim,
'reduce_all', reduce_all)
if porder == 2:
return _C_ops.elementwise_div(max_out, min_out, 'aixs', axis,
'use_mkldnn', False)
if porder == -2:
return _C_ops.elementwise_div(min_out, max_out, 'aixs', axis,
'use_mkldnn', False)
block = LayerHelper('norm', **locals())
out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
if porder == "nuc":
block.append_op(
type='reduce_sum',
inputs={'X': s},
outputs={'Out': out},
attrs={
'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all
})
return out
max_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
min_out = block.create_variable_for_type_inference(
dtype=block.input_dtype())
block.append_op(
type='reduce_max',
inputs={'X': s},
outputs={'Out': max_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
block.append_op(
type='reduce_min',
inputs={'X': s},
outputs={'Out': min_out},
attrs={'dim': axis,
'keep_dim': keepdim,
'reduce_all': reduce_all})
if porder == 2:
block.append_op(
type='elementwise_div',
inputs={'X': max_out,
'Y': min_out},
outputs={'Out': out},
attrs={'aixs': axis,
'use_mkldnn': False})
return out
if porder == -2:
block.append_op(
type='elementwise_div',
inputs={'X': min_out,
'Y': max_out},
outputs={'Out': out},
attrs={'aixs': axis,
'use_mkldnn': False})
return out
def empty_tensor(input, shape):
if paddle.in_dynamic_mode():
return input.reshape(shape)
raise ValueError("only support x is nonempty tensor in static mode")
x_shape = list(x.shape)
if not len(x_shape) >= 2:
raise ValueError("input should be a matrix or batches of matrices, " +
"but the dimention of received input is {}".format(
len(x_shape)))
if p == None:
p = 2
x_size = 0 if (0 in x_shape) else 1
if p in ("fro", "nuc", 1, -1, np.inf, -np.inf):
if x_shape[len(x_shape) - 1] == x_shape[len(x_shape) - 2]:
if x_size == 0:
return empty_tensor(x, x_shape[:-2])
x_inv = x.inverse()
if p == "fro":
return fro_norm(x) * fro_norm(x_inv)
if p == "nuc":
return svd_norm(x, p) * svd_norm(x_inv, p)
if p in (1, -1):
return mat_norm(
x, porder=p, axis=[-2]) * mat_norm(
x_inv, porder=p, axis=[-2])
if p in (np.inf, -np.inf):
return mat_norm(
x, porder=p, axis=[-1]) * mat_norm(
x_inv, porder=p, axis=[-1])
else:
raise ValueError("only support p is {} when input is a ".format(p) +
"square matrix or batches of square matrices")
elif p in (2, -2):
if x_size == 0:
return empty_tensor(x, x_shape[:-2])
return svd_norm(x, porder=p)
else:
raise ValueError(
"unsupported {} for p, only supporting ('fro', 'nuc', ".format(
p) + "1, -1, 2, -2, inf, -inf) or none")
def dot(x, y, name=None):
"""
This operator calculates inner product for vectors.
.. note::
Support 1-d and 2-d Tensor. When it is 2d, the first dimension of this matrix
is the batch dimension, which means that the vectors of multiple batches are dotted.
Parameters:
x(Tensor): 1-D or 2-D ``Tensor``. Its dtype should be ``float32``, ``float64``, ``int32``, ``int64``
y(Tensor): 1-D or 2-D ``Tensor``. Its dtype soulde be ``float32``, ``float64``, ``int32``, ``int64``
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Tensor: the calculated result Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
x_data = np.random.uniform(0.1, 1, [10]).astype(np.float32)
y_data = np.random.uniform(1, 3, [10]).astype(np.float32)
x = paddle.to_tensor(x_data)
y = paddle.to_tensor(y_data)
z = paddle.dot(x, y)
print(z)
"""
op_type = 'dot'
# skip var type check in dygraph mode to improve efficiency
if paddle.in_dynamic_mode():
op = getattr(_C_ops, op_type)
return op(x, y)
assert x is not None, 'x cannot be None in {}'.format(op_type)
assert y is not None, 'y cannot be None in {}'.format(op_type)
check_variable_and_dtype(x, 'x', ['float32', 'float64', 'int32', 'int64'],
op_type)
check_variable_and_dtype(y, 'y', ['float32', 'float64', 'int32', 'int64'],
op_type)
helper = LayerHelper(op_type, **locals())
if name is None:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = helper.create_variable(
name=name, dtype=x.dtype, persistable=False)
helper.append_op(
type="dot", inputs={'X': x,
'Y': y}, attrs={}, outputs={"Out": out})
return out
def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None):
"""
Estimate the covariance matrix of the input variables, given data and weights.
A covariance matrix is a square matrix, indicate the covariance of each pair variables in the input matrix.
For example, for an N-dimensional samples X=[x1,x2,…xN]T, then the covariance matrix
element Cij is the covariance of xi and xj. The element Cii is the variance of xi itself.
Parameters:
x(Tensor): A N-D(N<=2) Tensor containing multiple variables and observations. By default, each row of x represents a variable. Also see rowvar below.
rowvar(Bool, optional): If rowvar is True (default), then each row represents a variable, with observations in the columns. Default: True
ddof(Bool, optional): If ddof=True will return the unbiased estimate, and ddof=False will return the simple average. Default: True
fweights(Tensor, optional): 1-D Tensor of integer frequency weights; The number of times each observation vector should be repeated. Default: None
aweights(Tensor, optional): 1-D Tensor of observation vector weights. How important of the observation vector, larger data means this element is more important. Default: None
name(str, optional): Name of the output. Default is None. It's used to print debug info for developers. Details: :ref:`api_guide_Name`
Returns:
Tensor: The covariance matrix Tensor of the variables.
Examples:
.. code-block:: python
import paddle
xt = paddle.rand((3,4))
paddle.linalg.cov(xt)
'''
Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
[[0.07918842, 0.06127326, 0.01493049],
[0.06127326, 0.06166256, 0.00302668],
[0.01493049, 0.00302668, 0.01632146]])
'''
"""
op_type = 'cov'
if len(x.shape) > 2 or len(x.shape) < 1:
raise ValueError(
"Input(x) only support N-D (1<=N<=2) tensor in cov, but received "
"length of Input(input) is %s." % len(x.shape))
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cov')
nx = x
if len(x.shape) == 1:
nx = x.reshape((1, -1))
if not rowvar and nx.shape[0] != 1:
nx = nx.t()
w = None
observation_num = nx.shape[1]
if fweights is not None:
w = fweights.astype(nx.dtype)
if len(w.shape) > 1:
raise ValueError(
"Input(fweights) only support N-D (N<=1) tensor in cov, but received "
"shape of Input(input) is %s." % len(fweights.shape))
if fweights.shape[0] != observation_num:
raise ValueError(
"The number of Input(fweights) should equal to x's dim[1]: {}, but received "
"size of Input(fweights) is {}.".format(observation_num,
fweights.shape[0]))
if fweights.min() < 0:
raise ValueError(
"The value of Input(fweights) cannot be negtive, but received "
"min of Input(fweights) is {}.".format(fweights.min()))
if not paddle.all(fweights == paddle.round(fweights.astype('float64'))):
raise ValueError("Input(fweights) must be integer ")
if aweights is not None:
aw = aweights.astype(nx.dtype)
if len(aw.shape) > 1:
raise ValueError(
"Input(aweights) only support N-D (N<=1) tensor in cov, but received "
"length of Input(input) is %s." % len(aweights.shape))
check_variable_and_dtype(aweights, 'dtype', ['float32', 'float64'],
'cov')
if aweights.shape[0] != observation_num:
raise ValueError(
"The number of Input(aweights) should equal to x's dim[1]: {}, but received "
"size of Input(aweights) is {}.".format(observation_num,
aweights.shape[0]))
if aweights.min() < 0:
raise ValueError(
"The value of Input(aweights) cannot be negtive, but received "
"min of Input(aweights) is {}.".format(aweights.min()))
if w is not None:
w = w * aw
else:
w = aw
w_sum = paddle.to_tensor(observation_num, dtype=nx.dtype)
if fweights is not None or aweights is not None:
w_sum = w.sum()
if w_sum.item() == 0:
raise ValueError("The sum of weights is zero, can't be normalized.")
if w is not None:
nx_w = nx * w
avg = (nx_w).sum(axis=1) / w_sum
else:
avg = nx.sum(axis=1) / w_sum
nx_w = nx
if w is not None and aweights is not None and ddof == True:
norm_factor = w_sum - (w * aweights).sum() / w_sum
else:
norm_factor = w_sum - ddof
if norm_factor <= 0:
norm_factor = paddle.to_tensor(0, dtype=nx.dtype)
nx = nx - avg.unsqueeze(1)
xxt = paddle.mm(nx, nx_w.t().conj())
cov = paddle.divide(xxt, norm_factor).squeeze()
return cov
def t(input, name=None):
"""
Transpose <=2-D tensor.
0-D and 1-D tensors are returned as it is and 2-D tensor is equal to
the paddle.transpose function which perm dimensions set 0 and 1.
Args:
input (Tensor): The input Tensor. It is a N-D (N<=2) Tensor of data types float16, float32, float64, int32.
name(str, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
Returns:
Tensor: A transposed n-D Tensor, with data type being float16, float32, float64, int32, int64.
For Example:
.. code-block:: text
# Example 1 (0-D tensor)
x = tensor([0.79])
paddle.t(x) = tensor([0.79])
# Example 2 (1-D tensor)
x = tensor([0.79, 0.84, 0.32])
paddle.t(x) = tensor([0.79, 0.84, 0.32])
# Example 3 (2-D tensor)
x = tensor([0.79, 0.84, 0.32],
[0.64, 0.14, 0.57])
paddle.t(x) = tensor([0.79, 0.64],
[0.84, 0.14],
[0.32, 0.57])
Examples:
.. code-block:: python
import paddle
x = paddle.ones(shape=[2, 3], dtype='int32')
x_transposed = paddle.t(x)
print(x_transposed.shape)
# [3, 2]
"""
if len(input.shape) > 2:
raise ValueError(
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead." % len(input.shape))
if paddle.in_dynamic_mode():
if len(input.shape) == 1:
return input
# 2-D tensor
perm = [1, 0]
out, _ = _C_ops.transpose2(input, 'axis', perm)
return out
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64', 'int32',
'int64'], 'transpose')
helper = LayerHelper('t', **locals())
out = helper.create_variable_for_type_inference(input.dtype)
input_shape = helper.create_variable_for_type_inference(input.dtype)
if len(input.shape) == 1:
out = input
else:
helper.append_op(
type='transpose2',
inputs={'X': [input]},
outputs={'Out': [out],
'XShape': [input_shape]},
attrs={'axis': [1, 0]})
return out
def cross(x, y, axis=None, name=None):
"""
Computes the cross product between two tensors along an axis.
Inputs must have the same shape, and the length of their axes should be equal to 3.
If `axis` is not given, it defaults to the first axis found with the length 3.
Args:
x (Tensor): The first input tensor.
y (Tensor): The second input tensor.
axis (int, optional): The axis along which to compute the cross product. It defaults to the first axis found with the length 3.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor. A Tensor with same data type as `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[3.0, 3.0, 3.0]])
y = paddle.to_tensor([[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0],
[1.0, 1.0, 1.0]])
z1 = paddle.cross(x, y)
# [[-1. -1. -1.]
# [ 2. 2. 2.]
# [-1. -1. -1.]]
z2 = paddle.cross(x, y, axis=1)
# [[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]
"""
if in_dygraph_mode():
return _C_ops.final_state_cross(x, y, axis)
else:
if _in_legacy_dygraph():
if axis is not None:
return _C_ops.cross(x, y, 'dim', axis)
else:
return _C_ops.cross(x, y)
else:
helper = LayerHelper("cross", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
attrs = dict()
attrs['dim'] = axis
helper.append_op(
type='cross',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs=attrs)
return out
def cholesky(x, upper=False, name=None):
r"""
Computes the Cholesky decomposition of one symmetric positive-definite
matrix or batches of symmetric positive-definite matrice.
If `upper` is `True`, the decomposition has the form :math:`A = U^{T}U` ,
and the returned matrix :math:`U` is upper-triangular. Otherwise, the
decomposition has the form :math:`A = LL^{T}` , and the returned matrix
:math:`L` is lower-triangular.
Args:
x (Tensor): The input tensor. Its shape should be `[*, M, M]`,
where * is zero or more batch dimensions, and matrices on the
inner-most 2 dimensions all should be symmetric positive-definite.
Its data type should be float32 or float64.
upper (bool): The flag indicating whether to return upper or lower
triangular matrices. Default: False.
Returns:
Tensor: A Tensor with same shape and data type as `x`. It represents \
triangular matrices generated by Cholesky decomposition.
Examples:
.. code-block:: python
import paddle
import numpy as np
a = np.random.rand(3, 3)
a_t = np.transpose(a, [1, 0])
x_data = np.matmul(a, a_t) + 1e-03
x = paddle.to_tensor(x_data)
out = paddle.linalg.cholesky(x, upper=False)
print(out)
# [[1.190523 0. 0. ]
# [0.9906703 0.27676893 0. ]
# [1.25450498 0.05600871 0.06400121]]
"""
if paddle.in_dynamic_mode():
return _C_ops.cholesky(x, "upper", upper)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky')
check_type(upper, 'upper', bool, 'cholesky')
helper = LayerHelper('cholesky', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='cholesky',
inputs={'X': [x]},
outputs={'Out': out},
attrs={'upper': upper})
return out
def matrix_rank(x, tol=None, hermitian=False, name=None):
r"""
Computes the rank of a matrix.
The rank of a matrix is the number of singular values that are greater than the specified `tol` threshold when hermitian=False,
or the number of eigenvalues in absolute value that are greater than the specified `tol` threshold when hermitian=True.
Args:
x (Tensor): The input tensor. Its shape should be `[..., m, n]`, where `...` is zero or more batch dimensions. If `x` is a batch
of matrices then the output has the same batch dimensions. The data type of `x` should be float32 or float64.
tol (float,Tensor,optional): the tolerance value. Default: None. If `tol` is not specified, and `sigma` is the largest
singular value (or eigenvalues in absolute value), and `eps` is the epsilon value for the dtype of `x`, then `tol` is computed
with formula `tol=sigma * max(m,n) * eps`. Note that if `x` is a batch of matrices, `tol` is computed this way for every batch.
hermitian (bool,optional): indicates whether `x` is Hermitian. Default: False. When hermitian=True, `x` is assumed to be Hermitian,
enabling a more efficient method for finding eigenvalues, but `x` is not checked inside the function. Instead, We just use
the lower triangular of the matrix to compute.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: Rank of tensor x.
Examples:
.. code-block:: python
import paddle
a = paddle.eye(10)
b = paddle.linalg.matrix_rank(a)
print(b)
# b = [10]
c = paddle.ones(shape=[3, 4, 5, 5])
d = paddle.linalg.matrix_rank(c, tol=0.01, hermitian=True)
print(d)
# d = [[1, 1, 1, 1],
# [1, 1, 1, 1],
# [1, 1, 1, 1]]
"""
if paddle.in_dynamic_mode():
if tol is None:
tol_tensor = None
tol_attr = 0.0
use_default_tol = True
elif isinstance(tol, Variable):
if tol.dtype != x.dtype:
tol_tensor = cast(tol, x.dtype)
else:
tol_tensor = tol
tol_attr = 0.0
use_default_tol = False
else:
tol_tensor = None
tol_attr = float(tol)
use_default_tol = False
return _C_ops.matrix_rank(x, tol_tensor, "tol", tol_attr, 'hermitian',
hermitian, 'use_default_tol', use_default_tol)
inputs = {}
attrs = {}
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'matrix_rank')
inputs['X'] = x
if tol is None:
attrs['use_default_tol'] = True
elif isinstance(tol, Variable):
check_variable_and_dtype(tol, 'tol', ['float32'], 'matrix_rank')
attrs['use_default_tol'] = False
if tol.dtype != x.dtype:
inputs['TolTensor'] = cast(tol, x.dtype)
else:
inputs['TolTensor'] = tol
else:
check_type(tol, 'tol', float, 'matrix_rank')
attrs['use_default_tol'] = False
attrs['tol'] = tol
check_type(hermitian, 'hermitian', bool, 'matrix_rank')
attrs['hermitian'] = hermitian
helper = LayerHelper('matrix_rank', **locals())
out = helper.create_variable_for_type_inference(dtype='int32')
helper.append_op(
type='matrix_rank', inputs=inputs, outputs={'Out': out}, attrs=attrs)
return out
def bmm(x, y, name=None):
"""
Applies batched matrix multiplication to two tensors.
Both of the two input tensors must be three-dementional and share the same batch size.
if x is a (b, m, k) tensor, y is a (b, k, n) tensor, the output will be a (b, m, n) tensor.
Args:
x (Tensor): The input Tensor.
y (Tensor): The input Tensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The product Tensor.
Examples:
.. code-block:: python
import paddle
# In imperative mode:
# size x: (2, 2, 3) and y: (2, 3, 2)
x = paddle.to_tensor([[[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0]],
[[3.0, 3.0, 3.0],
[4.0, 4.0, 4.0]]])
y = paddle.to_tensor([[[1.0, 1.0],[2.0, 2.0],[3.0, 3.0]],
[[4.0, 4.0],[5.0, 5.0],[6.0, 6.0]]])
out = paddle.bmm(x, y)
#output size: (2, 2, 2)
#output value:
#[[[6.0, 6.0],[12.0, 12.0]],[[45.0, 45.0],[60.0, 60.0]]]
out_np = out.numpy()
"""
x_shape = x.shape
y_shape = y.shape
if not len(x_shape) == len(y_shape) == 3:
raise ValueError(
"x and y should be 3-dimensional. But received x's dimention: {}, y's dimention: {}".
format(x_shape, y_shape))
if x_shape[2] != y_shape[1]:
raise ValueError(
"x's width must be equal with y's height. But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
if x_shape[0] != y_shape[0]:
raise ValueError(
"x's batch (shape[0]) must be equal with y's batch (shape[0]). But received x's shape: {}, y's shape: {}".
format(x_shape, y_shape))
if paddle.in_dynamic_mode():
return _C_ops.bmm(x, y)
helper = LayerHelper('bmm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='bmm', inputs={'X': x, 'Y': y}, outputs={'Out': out})
return out
def histogram(input, bins=100, min=0, max=0, name=None):
"""
Computes the histogram of a tensor. The elements are sorted into equal width bins between min and max.
If min and max are both zero, the minimum and maximum values of the data are used.
Args:
input (Tensor): A Tensor(or LoDTensor) with shape :math:`[N_1, N_2,..., N_k]` . The data type of the input Tensor
should be float32, float64, int32, int64.
bins (int): number of histogram bins
min (int): lower end of the range (inclusive)
max (int): upper end of the range (inclusive)
Returns:
Tensor: data type is int64, shape is (nbins,).
Examples:
.. code-block:: python
import paddle
inputs = paddle.to_tensor([1, 2, 1])
result = paddle.histogram(inputs, bins=4, min=0, max=3)
print(result) # [0, 2, 1, 0]
"""
if paddle.in_dynamic_mode():
return _C_ops.histogram(input, "bins", bins, "min", min, "max", max)
helper = LayerHelper('histogram', **locals())
check_variable_and_dtype(
input, 'X', ['int32', 'int64', 'float32', 'float64'], 'histogram')
out = helper.create_variable_for_type_inference(VarDesc.VarType.INT64)
helper.append_op(
type='histogram',
inputs={'X': input},
outputs={'Out': out},
attrs={'bins': bins,
'min': min,
'max': max})
return out
def bincount(x, weights=None, minlength=0, name=None):
"""
Computes frequency of each value in the input tensor.
Args:
x (Tensor): A Tensor with non-negative integer. Should be 1-D tensor.
weights (Tensor, optional): Weight for each value in the input tensor. Should have the same shape as input. Default is None.
minlength (int, optional): Minimum number of bins. Should be non-negative integer. Default is 0.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor of frequency.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([1, 2, 1, 4, 5])
result1 = paddle.bincount(x)
print(result1) # [0, 2, 1, 0, 1, 1]
w = paddle.to_tensor([2.1, 0.4, 0.1, 0.5, 0.5])
result2 = paddle.bincount(x, weights=w)
print(result2) # [0., 2.19999981, 0.40000001, 0., 0.50000000, 0.50000000]
"""
if x.dtype not in [paddle.int32, paddle.int64]:
raise TypeError("Elements in Input(x) should all be integers")
if paddle.in_dynamic_mode():
return _C_ops.bincount(x, weights, "minlength", minlength)
helper = LayerHelper('bincount', **locals())
check_variable_and_dtype(x, 'X', ['int32', 'int64'], 'bincount')
if weights is not None:
check_variable_and_dtype(weights, 'Weights',
['int32', 'int64', 'float32', 'float64'],
'bincount')
out = helper.create_variable_for_type_inference(dtype=weights.dtype)
else:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='bincount',
inputs={'X': x,
'Weights': weights},
outputs={'Out': out},
attrs={'minlength': minlength})
return out
def mv(x, vec, name=None):
"""
Performs a matrix-vector product of the matrix x and the vector vec.
Args:
x (Tensor): A tensor with shape :math:`[M, N]` , The data type of the input Tensor x
should be one of float32, float64.
vec (Tensor): A tensor with shape :math:`[N]` , The data type of the input Tensor x
should be one of float32, float64.
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor which is producted by x and vec.
Examples:
.. code-block:: python
# x: [M, N], vec: [N]
# paddle.mv(x, vec) # out: [M]
import numpy as np
import paddle
x_data = np.array([[2, 1, 3], [3, 0, 1]]).astype("float64")
x = paddle.to_tensor(x_data)
vec_data = np.array([3, 5, 1])
vec = paddle.to_tensor(vec_data).astype("float64")
out = paddle.mv(x, vec)
"""
if in_dygraph_mode():
return _C_ops.final_state_mv(x, vec)
else:
if _in_legacy_dygraph():
out = _C_ops.mv(x, vec)
return out
else:
def __check_input(x, vec):
var_names = {'x': x, 'vec': vec}
for name, val in var_names.items():
check_variable_and_dtype(val, name, ['float32', 'float64'],
'mv')
x_shape = list(x.shape)
vec_shape = list(vec.shape)
if len(x_shape) != 2:
raise ValueError(
"x should be 2-dimensional. But received x's dimention: {}".
format(x_shape))
if len(vec_shape) != 1:
raise ValueError(
"vec should be 1-dimensional. But received vec's dimention: {}".
format(vec_shape))
__check_input(x, vec)
helper = LayerHelper('mv', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='mv', inputs={'X': x,
'Vec': vec}, outputs={'Out': out})
return out
def det(x, name=None):
"""
Calculates determinant value of a square matrix or batches of square matrices.
Args:
x (Tensor): input (Tensor): the input matrix of size `(n, n)` or the batch of matrices of size
`(*, n, n)` where `*` is one or more batch dimensions.
Returns:
y (Tensor):the determinant value of a square matrix or batches of square matrices.
Examples:
.. code-block:: python
import paddle
x = paddle.randn([3,3,3])
A = paddle.linalg.det(x)
print(A)
# [ 0.02547996, 2.52317095, -6.15900707])
"""
if paddle.in_dynamic_mode():
return _C_ops.determinant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'det')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"but received Input x's dimensional: %s.\n" % \
len(input_shape)
assert (input_shape[-1] == input_shape[-2]), \
"Expect squared input," \
"but received %s by %s matrix.\n" \
%(input_shape[-2], input_shape[-1]) \
helper = LayerHelper('determinant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='determinant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out
def slogdet(x, name=None):
"""
Calculates the sign and natural logarithm of the absolute value of a square matrix's or batches square matrices' determinant.
The determinant can be computed with ``sign * exp(logabsdet)
Supports input of float, double
Note that for matrices that have zero determinant, this returns ``(0, -inf)``
Args:
x (Tensor): the batch of matrices of size :math:`(*, n, n)`
where math:`*` is one or more batch dimensions.
Returns:
y (Tensor): A tensor containing the sign of the determinant and the natural logarithm
of the absolute value of determinant, respectively.
Examples:
.. code-block:: python
import paddle
x = paddle.randn([3,3,3])
A = paddle.linalg.slogdet(x)
print(A)
# [[ 1. , 1. , -1. ],
# [-0.98610914, -0.43010661, -0.10872950]])
"""
if paddle.in_dynamic_mode():
return _C_ops.slogdeterminant(x)
check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet')
input_shape = list(x.shape)
assert len(input_shape) >= 2, \
"The x must be at least 2-dimensional, " \
"but received Input x's dimensional: %s.\n" % \
len(input_shape)
assert (input_shape[-1] == input_shape[-2]), \
"Expect squared input," \
"but received %s by %s matrix.\n" \
%(input_shape[-2], input_shape[-1]) \
helper = LayerHelper('slogdeterminant', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='slogdeterminant', inputs={'Input': [x]}, outputs={'Out': [out]})
return out
def svd(x, full_matrices=False, name=None):
r"""
Computes the singular value decomposition of one matrix or a batch of regular matrices.
Let :math:`X` be the input matrix or a batch of input matrices, the output should satisfies:
.. math::
X = U * diag(S) * VT
Args:
x (Tensor): The input tensor. Its shape should be `[..., N, M]`,
where `...` is zero or more batch dimensions. N and M can be arbitraty
positive number. Note that if x is sigular matrices, the grad is numerical
instable. The data type of x should be float32 or float64.
full_matrices (bool): A flag to control the behavor of svd.
If full_matrices = True, svd op will compute full U and V matrics,
which means shape of U is `[..., N, N]`, shape of V is `[..., M, M]`. K = min(M, N).
If full_matrices = False, svd op will use a economic method to store U and V.
which means shape of U is `[..., N, K]`, shape of V is `[..., M, K]`. K = min(M, N).
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tuple of 3 tensors: (U, S, VH). VH is the conjugate transpose of V. S is the singlar value vectors of matrics with shape `[..., K]`
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [1.0, 3.0], [4.0, 6.0]]).astype('float64')
x = x.reshape([3, 2])
u, s, vh = paddle.linalg.svd(x)
print (u)
#U = [[ 0.27364809, -0.21695147 ],
# [ 0.37892198, -0.87112408 ],
# [ 0.8840446 , 0.44053933 ]]
print (s)
#S = [8.14753743, 0.78589688]
print (vh)
#VT= [[ 0.51411221, 0.85772294],
# [ 0.85772294, -0.51411221]]
# one can verify : U * S * VT == X
# U * UH == I
# V * VH == I
"""
if paddle.in_dynamic_mode():
return _C_ops.svd(x, 'full_matrices', full_matrices)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd')
check_type(full_matrices, 'full_matrices', bool, 'svd')
helper = LayerHelper('svd', **locals())
u = helper.create_variable_for_type_inference(dtype=x.dtype)
vh = helper.create_variable_for_type_inference(dtype=x.dtype)
s = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['full_matrices'] = full_matrices
helper.append_op(
type='svd',
inputs={'X': [x]},
outputs={'U': u,
'VH': vh,
'S': s},
attrs=attrs, )
return u, s, vh
def matrix_power(x, n, name=None):
r"""
Computes the n-th power of a square matrix or a batch of square matrices.
Let :math:`X` be a sqaure matrix or a batch of square matrices, :math:`n` be
an exponent, the equation should be:
.. math::
Out = X ^ {n}
Specifically,
- If `n > 0`, it returns the matrix or a batch of matrices raised to the power
of `n`.
- If `n = 0`, it returns the identity matrix or a batch of identity matrices.
- If `n < 0`, it returns the inverse of each matrix (if invertible) raised to
the power of `abs(n)`.
Args:
x (Tensor): A square matrix or a batch of square matrices to be raised
to power `n`. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
n (int): The exponent. It can be any positive, negative integer or zero.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The n-th power of the matrix (or the batch of matrices) `x`. Its
data type should be the same as that of `x`.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1, 2, 3],
[1, 4, 9],
[1, 8, 27]], dtype='float64')
print(paddle.linalg.matrix_power(x, 2))
# [[6. , 34. , 102.],
# [14. , 90. , 282.],
# [36. , 250., 804.]]
print(paddle.linalg.matrix_power(x, 0))
# [[1., 0., 0.],
# [0., 1., 0.],
# [0., 0., 1.]]
print(paddle.linalg.matrix_power(x, -2))
# [[ 12.91666667, -12.75000000, 2.83333333 ],
# [-7.66666667 , 8. , -1.83333333 ],
# [ 1.80555556 , -1.91666667 , 0.44444444 ]]
"""
if paddle.in_dynamic_mode():
return _C_ops.matrix_power(x, "n", n)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'matrix_power')
check_type(n, 'n', int, 'matrix_power')
helper = LayerHelper('matrix_power', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matrix_power',
inputs={'X': x},
outputs={'Out': out},
attrs={'n': n})
return out
def qr(x, mode="reduced", name=None):
r"""
Computes the QR decomposition of one matrix or batches of matrice (backward is unsupported now).
Args:
x (Tensor): The input tensor. Its shape should be `[..., M, N]`,
where ... is zero or more batch dimensions. M and N can be arbitrary
positive number. The data type of x should be float32 or float64.
mode (str, optional): A flag to control the behavior of qr, the default is "reduced".
Suppose x's shape is `[..., M, N]` and denoting `K = min(M, N)`:
If mode = "reduced", qr op will return reduced Q and R matrices,
which means Q's shape is `[..., M, K]` and R's shape is `[..., K, N]`.
If mode = "complete", qr op will return complete Q and R matrices,
which means Q's shape is `[..., M, M]` and R's shape is `[..., M, N]`.
If mode = "r", qr op will only return reduced R matrix, which means
R's shape is `[..., K, N]`.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
If mode = "reduced" or mode = "complete", qr will return a two tensor-tuple, which represents Q and R.
If mode = "r", qr will return a tensor which represents R.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
q, r = paddle.linalg.qr(x)
print (q)
print (r)
# Q = [[-0.16903085, 0.89708523],
# [-0.50709255, 0.27602622],
# [-0.84515425, -0.34503278]])
# R = [[-5.91607978, -7.43735744],
# [ 0. , 0.82807867]])
# one can verify : X = Q * R ;
"""
if paddle.in_dynamic_mode():
q, r = _C_ops.qr(x, 'mode', mode)
if mode == "r":
return r
else:
return q, r
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'qr')
check_type(mode, 'mode', str, 'qr')
helper = LayerHelper('qr', **locals())
q = helper.create_variable_for_type_inference(dtype=x.dtype)
r = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['mode'] = mode
helper.append_op(
type='qr', inputs={'X': [x]}, outputs={'Q': q,
'R': r}, attrs=attrs)
if mode == "r":
return r
else:
return q, r
def lu(x, pivot=True, get_infos=False, name=None):
r"""
Computes the LU factorization of an N-D(N>=2) matrix x.
Returns the LU factorization(inplace x) and Pivots. low triangular matrix L and
upper triangular matrix U are combined to a single LU matrix.
Pivoting is done if pivot is set to True.
P mat can be get by pivots:
# ones = eye(rows) #eye matrix of rank rows
# for i in range(cols):
# swap(ones[i], ones[pivots[i]])
# return ones
Args:
X (Tensor): the tensor to factor of N-dimensions(N>=2).
pivot (bool, optional): controls whether pivoting is done. Default: True.
get_infos (bool, optional): if set to True, returns an info IntTensor. Default: False.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
factorization (Tensor): LU matrix, the factorization of input X.
pivots (IntTensor): the pivots of size(∗(N-2), min(m,n)). `pivots` stores all the
intermediate transpositions of rows. The final permutation `perm` could be
reconstructed by this, details refer to upper example.
infos (IntTensor, optional): if `get_infos` is `True`, this is a tensor of size (∗(N-2))
where non-zero values indicate whether factorization for the matrix or each minibatch
has succeeded or failed.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
lu,p,info = paddle.linalg.lu(x, get_infos=True)
# >>> lu:
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0.20000000, 0.80000000],
# [0.60000000, 0.50000000]])
# >>> p
# Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# [3, 3])
# >>> info
# Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# 0)
P,L,U = paddle.linalg.lu_unpack(lu,p)
# >>> P
# (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [1., 0., 0.]]),
# >>> L
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[1. , 0. ],
# [0.20000000, 1. ],
# [0.60000000, 0.50000000]]),
# >>> U
# Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0. , 0.80000000]]))
# one can verify : X = P @ L @ U ;
"""
if paddle.in_dynamic_mode():
LU, Piv, Info = _C_ops.lu(x, 'pivots', pivot)
if get_infos:
return LU, Piv, Info
else:
return LU, Piv
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu')
helper = LayerHelper('lu', **locals())
lu = helper.create_variable_for_type_inference(dtype=x.dtype)
p = helper.create_variable_for_type_inference(dtype='int')
info = helper.create_variable_for_type_inference(dtype='int')
attrs = dict()
attrs['pivots'] = pivot
helper.append_op(
type='lu',
inputs={'X': x},
outputs={'Out': lu,
'Pivots': p,
'Infos': info},
attrs=attrs)
if get_infos:
return lu, p, info
else:
return lu, p
def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None):
r"""
Unpack L U and P to single matrix tensor .
unpack L and U matrix from LU, unpack permutation matrix P from Pivtos .
P mat can be get by pivots:
# ones = eye(rows) #eye matrix of rank rows
# for i in range(cols):
# swap(ones[i], ones[pivots[i]])
Args:
x (Tensor): The LU tensor get from paddle.linalg.lu, which is combined by L and U.
y (Tensor): Pivots get from paddle.linalg.lu.
unpack_ludata (bool,optional): whether to unpack L and U from x. Default: True.
unpack_pivots (bool, optional): whether to unpack permutation matrix P from Pivtos. Default: True.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
P (Tensor): Permutation matrix P of lu factorization.
L (Tensor): The lower triangular matrix tensor of lu factorization.
U (Tensor): The upper triangular matrix tensor of lu factorization.
Examples:
.. code-block:: python
import paddle
x = paddle.to_tensor([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]]).astype('float64')
lu,p,info = paddle.linalg.lu(x, get_infos=True)
# >>> lu:
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0.20000000, 0.80000000],
# [0.60000000, 0.50000000]])
# >>> p
# Tensor(shape=[2], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# [3, 3])
# >>> info
# Tensor(shape=[], dtype=int32, place=CUDAPlace(0), stop_gradient=True,
# 0)
P,L,U = paddle.linalg.lu_unpack(lu,p)
# >>> P
# (Tensor(shape=[3, 3], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[0., 1., 0.],
# [0., 0., 1.],
# [1., 0., 0.]]),
# >>> L
# Tensor(shape=[3, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[1. , 0. ],
# [0.20000000, 1. ],
# [0.60000000, 0.50000000]]),
# >>> U
# Tensor(shape=[2, 2], dtype=float64, place=CUDAPlace(0), stop_gradient=True,
# [[5. , 6. ],
# [0. , 0.80000000]]))
# one can verify : X = P @ L @ U ;
"""
if paddle.in_dynamic_mode():
P, L, U = _C_ops.lu_unpack(x, y, 'unpack_ludata', unpack_ludata,
'unpack_pivots', unpack_pivots)
return P, L, U
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu_unpack')
helper = LayerHelper('lu_unpack', **locals())
p = helper.create_variable_for_type_inference(dtype=x.dtype)
l = helper.create_variable_for_type_inference(dtype=x.dtype)
u = helper.create_variable_for_type_inference(dtype=x.dtype)
attrs = dict()
attrs['unpack_ludata'] = unpack_ludata
attrs['unpack_pivots'] = unpack_pivots
helper.append_op(
type='lu_unpack',
inputs={'X': x,
'Pivots': y},
outputs={'Pmat': p,
'L': l,
'U': u},
attrs=attrs)
return p, l, u
def eig(x, name=None):
"""
This API performs the eigenvalue decomposition of a square matrix or a batch of square matrices.
.. note::
If the matrix is a Hermitian or a real symmetric matrix, please use :ref:`paddle.linalg.eigh` instead, which is much faster.
If only eigenvalues is needed, please use :ref:`paddle.linalg.eigvals` instead.
If the matrix is of any shape, please use :ref:`paddle.linalg.svd`.
This API is only supported on CPU device.
The output datatype is always complex for both real and complex input.
Args:
x (Tensor): A tensor with shape math:`[*, N, N]`, The data type of the x should be one of ``float32``,
``float64``, ``compplex64`` or ``complex128``.
name (str, optional): The default value is `None`. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Eigenvalues(Tensors): A tensor with shape math:`[*, N]` refers to the eigen values.
Eigenvectors(Tensors): A tensor with shape math:`[*, N, N]` refers to the eigen vectors.
Examples:
.. code-block:: python
import paddle
import numpy as np
paddle.device.set_device("cpu")
x_data = np.array([[1.6707249, 7.2249975, 6.5045543],
[9.956216, 8.749598, 6.066444 ],
[4.4251957, 1.7983172, 0.370647 ]]).astype("float32")
x = paddle.to_tensor(x_data)
w, v = paddle.linalg.eig(x)
print(w)
# Tensor(shape=[3, 3], dtype=complex128, place=CPUPlace, stop_gradient=False,
# [[(-0.5061363550800655+0j) , (-0.7971760990842826+0j) ,
# (0.18518077798279986+0j)],
# [(-0.8308237755993192+0j) , (0.3463813401919749+0j) ,
# (-0.6837005269141947+0j) ],
# [(-0.23142567697893396+0j), (0.4944999840400175+0j) ,
# (0.7058765252952796+0j) ]])
print(v)
# Tensor(shape=[3], dtype=complex128, place=CPUPlace, stop_gradient=False,
# [ (16.50471283351188+0j) , (-5.5034820550763515+0j) ,
# (-0.21026087843552282+0j)])
"""
if paddle.in_dynamic_mode():
w, v = _C_ops.eig(x)
return w, v
check_variable_and_dtype(
x, 'X', ['float32', 'float64', 'complex64', 'complex128'], 'eig')
helper = LayerHelper('eig', **locals())
w = helper.create_variable_for_type_inference(x.dtype)
v = helper.create_variable_for_type_inference(x.dtype)
inputs = {'X': x}
outputs = {'Eigenvalues': w, 'Eigenvectors': v}
helper.append_op(type='eig', inputs=inputs, outputs=outputs)
return w, v
def eigvals(x, name=None):
"""
Compute the eigenvalues of one or more general matrices.
Warning:
The gradient kernel of this operator does not yet developed.
If you need back propagation through this operator, please replace it with paddle.linalg.eig.
Args:
x (Tensor): A square matrix or a batch of square matrices whose eigenvalues will be computed.
Its shape should be `[*, M, M]`, where `*` is zero or more batch dimensions.
Its data type should be float32, float64, complex64, or complex128.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: A tensor containing the unsorted eigenvalues which has the same batch dimensions with `x`.
The eigenvalues are complex-valued even when `x` is real.
Examples:
.. code-block:: python
import paddle
paddle.set_device("cpu")
paddle.seed(1234)
x = paddle.rand(shape=[3, 3], dtype='float64')
# [[0.02773777, 0.93004224, 0.06911496],
# [0.24831591, 0.45733623, 0.07717843],
# [0.48016702, 0.14235102, 0.42620817]])
print(paddle.linalg.eigvals(x))
# [(-0.27078833542132674+0j), (0.29962280156230725+0j), (0.8824477020120244+0j)] #complex128
"""
check_variable_and_dtype(x, 'dtype',
['float32', 'float64', 'complex64',
'complex128'], 'eigvals')
x_shape = list(x.shape)
if len(x_shape) < 2:
raise ValueError(
"The dimension of Input(x) should be at least 2, but received x's dimention = {}, x's shape = {}".
format(len(x_shape), x_shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The last two dimensions of Input(x) should be equal, but received x's shape = {}".
format(x_shape))
if paddle.in_dynamic_mode():
return _C_ops.eigvals(x)
helper = LayerHelper('eigvals', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(type='eigvals', inputs={'X': x}, outputs={'Out': out})
return out
def multi_dot(x, name=None):
"""
Multi_dot is an operator that calculates multiple matrix multiplications.
Supports inputs of float16(only GPU support), float32 and float64 dtypes. This function does not
support batched inputs.
The input tensor in [x] must be 2-D except for the first and last can be 1-D.
If the first tensor is a 1-D vector of shape(n, ) it is treated as row vector
of shape(1, n), similarly if the last tensor is a 1D vector of shape(n, ), it
is treated as a column vector of shape(n, 1).
If the first and last tensor are 2-D matrix, then the output is also 2-D matrix,
otherwise the output is a 1-D vector.
Multi_dot will select the lowest cost multiplication order for calculation. The
cost of multiplying two matrices with shapes (a, b) and (b, c) is a * b * c.
Given matrices A, B, C with shapes (20, 5), (5, 100), (100, 10) respectively,
we can calculate the cost of different multiplication orders as follows:
- Cost((AB)C) = 20x5x100 + 20x100x10 = 30000
- Cost(A(BC)) = 5x100x10 + 20x5x10 = 6000
In this case, multiplying B and C first, then multiply A, which is 5 times faster
than sequential calculation.
Args:
x ([Tensor]): The input tensors which is a list Tensor.
name(str|None): A name for this layer(optional). If set None, the layer
will be named automatically.
Returns:
Tensor: The output Tensor.
Examples:
.. code-block:: python
import paddle
import numpy as np
# A * B
A_data = np.random.random([3, 4]).astype(np.float32)
B_data = np.random.random([4, 5]).astype(np.float32)
A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data)
out = paddle.linalg.multi_dot([A, B])
print(out.numpy().shape)
# [3, 5]
# A * B * C
A_data = np.random.random([10, 5]).astype(np.float32)
B_data = np.random.random([5, 8]).astype(np.float32)
C_data = np.random.random([8, 7]).astype(np.float32)
A = paddle.to_tensor(A_data)
B = paddle.to_tensor(B_data)
C = paddle.to_tensor(C_data)
out = paddle.linalg.multi_dot([A, B, C])
print(out.numpy().shape)
# [10, 7]
"""
if paddle.in_dynamic_mode():
return _C_ops.multi_dot(x)
check_type(x, 'x', (list, tuple), 'multi_dot')
for id, item in enumerate(x):
check_variable_and_dtype(item, 'x[' + str(id) + ']',
['float16', 'float32', 'float64'], 'multi_dot')
if item.dtype != x[0].dtype:
raise TypeError(
"All the Tensors in the input must have the same data type.")
helper = LayerHelper('multi_dot', **locals())
dtype = helper.input_dtype(input_param_name='x')
out = helper.create_variable_for_type_inference(dtype)
helper.append_op(type='multi_dot', inputs={"X": x}, outputs={"Out": out})
return out
def eigh(x, UPLO='L', name=None):
"""
Compute the eigenvalues and eigenvectors of a
complex Hermitian (conjugate symmetric) or a real symmetric matrix.
Args:
x (Tensor): A tensor with shape :math:`[*, N, N]` , The data type of the input Tensor x
should be one of float32, float64, complex64, complex128.
UPLO(str, optional): (string, default 'L'), 'L' represents the lower triangular matrix,
"'U' represents the upper triangular matrix.".
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
out_value(Tensor): A Tensor with shape [*, N] and data type of float32 and float64. The eigenvalues of eigh op.
out_vector(Tensor): A Tensor with shape [*, N, N] and data type of float32,float64,complex64 and complex128. The eigenvectors of eigh op.
Examples:
.. code-block:: python
import numpy as np
import paddle
x_data = np.array([[1, -2j], [2j, 5]])
x = paddle.to_tensor(x_data)
out_value, out_vector = paddle.linalg.eigh(x, UPLO='L')
print(out_value)
#[0.17157288, 5.82842712]
print(out_vector)
#[(-0.9238795325112867+0j), (-0.3826834323650898+0j)],
#[ 0.3826834323650898j , -0.9238795325112867j ]]
"""
if paddle.in_dynamic_mode():
return _C_ops.eigh(x, 'UPLO', UPLO)
def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The input matrix must be batches of square matrices. But received x's dimention: {}".
format(x_shape))
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
"UPLO must be L or U. But received UPLO is: {}".format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigh', **locals())
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'eigh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='eigh',
inputs={'X': x},
outputs={'Eigenvalues': out_value,
'Eigenvectors': out_vector},
attrs={'UPLO': UPLO})
return out_value, out_vector
def pinv(x, rcond=1e-15, hermitian=False, name=None):
r"""
Calculate pseudo inverse via SVD(singular value decomposition)
of one matrix or batches of regular matrix.
.. math::
if hermitian == False:
x = u * s * vt (SVD)
out = v * 1/s * ut
else:
x = u * s * ut (eigh)
out = u * 1/s * u.conj().transpose(-2,-1)
If x is hermitian or symmetric matrix, svd will be replaced with eigh.
Args:
x(Tensor): The input tensor. Its shape should be (*, m, n)
where * is zero or more batch dimensions. m and n can be
arbitraty positive number. The data type of x should be
float32 or float64 or complex64 or complex128. When data
type is complex64 or cpmplex128, hermitian should be set
True.
rcond(Tensor, optional): the tolerance value to determine
when is a singular value zero. Defalut:1e-15.
hermitian(bool, optional): indicates whether x is Hermitian
if complex or symmetric if real. Default: False.
name(str|None): A name for this layer(optional). If set None,
the layer will be named automatically.
Returns:
Tensor: The tensor with same data type with x. it represents
pseudo inverse of x. Its shape should be (*, n, m).
Examples:
.. code-block:: python
import paddle
x = paddle.arange(15).reshape((3, 5)).astype('float64')
input = paddle.to_tensor(x)
out = paddle.linalg.pinv(input)
print(input)
print(out)
# input:
# [[0. , 1. , 2. , 3. , 4. ],
# [5. , 6. , 7. , 8. , 9. ],
# [10., 11., 12., 13., 14.]]
# out:
# [[-0.22666667, -0.06666667, 0.09333333],
# [-0.12333333, -0.03333333, 0.05666667],
# [-0.02000000, 0.00000000, 0.02000000],
# [ 0.08333333, 0.03333333, -0.01666667],
# [ 0.18666667, 0.06666667, -0.05333333]]
# one can verify : x * out * x = x ;
# or out * x * out = x ;
"""
if paddle.in_dynamic_mode():
if not hermitian:
# combine svd and matmul op
u, s, vt = _C_ops.svd(x, 'full_matrices', False)
max_singular_val = _C_ops.reduce_max(s, 'dim', [-1], 'keep_dim', True, \
'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=x.dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = paddle.to_tensor(y, dtype=x.dtype)
condition = s > cutoff
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2])
dims = list(range(len(vt.shape)))
perm = dims[:-2] + [dims[-1]] + [dims[-2]]
v, _ = _C_ops.transpose2(vt, 'axis', perm)
out_1 = v * st
out_2 = _C_ops.matmul_v2(out_1, u, 'trans_x', False, 'trans_y',
True)
return out_2
else:
# combine eigh and matmul op
s, u = _C_ops.eigh(x, 'UPLO', 'L')
s_abs = paddle.abs(s)
max_singular_val = _C_ops.reduce_max(s_abs, 'dim', [-1], 'keep_dim', True, \
'reduce_all', False)
rcond = paddle.to_tensor(rcond, dtype=s.dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = paddle.to_tensor(y, dtype=s.dtype)
condition = s_abs > cutoff
cond_int = layers.cast(condition, s.dtype)
cond_not_int = layers.cast(layers.logical_not(condition), s.dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st, _ = _C_ops.unsqueeze2(singular, 'axes', [-2])
out_1 = u * st
u_conj = _C_ops.conj(u)
out_2 = _C_ops.matmul_v2(out_1, u_conj, 'trans_x', False, 'trans_y',
True)
return out_2
else:
if not hermitian:
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pinv')
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(dtype)
vt = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='svd',
inputs={'X': [x]},
outputs={'U': u,
'VH': vt,
'S': s},
attrs={'full_matrices': False}, )
max_singular_val = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='reduce_max',
inputs={'X': s},
outputs={'Out': max_singular_val},
attrs={'dim': [-1],
'keep_dim': True,
'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=dtype)
cutoff = rcond * max_singular_val
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=dtype)
condition = s > cutoff
cond_int = layers.cast(condition, dtype)
cond_not_int = layers.cast(layers.logical_not(condition), dtype)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=dtype)
st_shape = helper.create_variable_for_type_inference(dtype=dtype)
helper.append_op(
type='unsqueeze2',
inputs={'X': singular},
attrs={'axes': [-2]},
outputs={'Out': st,
'XShape': st_shape})
dims = list(range(len(vt.shape)))
perm = dims[:-2] + [dims[-1]] + [dims[-2]]
v = helper.create_variable_for_type_inference(dtype)
v_shape = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='transpose2',
inputs={'X': [vt]},
outputs={'Out': [v],
'XShape': [v_shape]},
attrs={'axis': perm})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_mul',
inputs={'X': v,
'Y': st},
outputs={'Out': out_1},
attrs={'axis': -1,
'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': out_1,
'Y': u},
outputs={'Out': out_2},
attrs={'trans_x': False,
'trans_y': True}, )
return out_2
else:
helper = LayerHelper('pinv', **locals())
dtype = x.dtype
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64',
'complex128'], 'pinv')
if dtype == paddle.complex128:
s_type = 'float64'
elif dtype == paddle.complex64:
s_type = 'float32'
else:
s_type = dtype
u = helper.create_variable_for_type_inference(dtype)
s = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='eigh',
inputs={'X': x},
outputs={'Eigenvalues': s,
'Eigenvectors': u},
attrs={'UPLO': 'L'})
s_abs = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='abs', inputs={'X': s}, outputs={'Out': s_abs})
max_singular_val = helper.create_variable_for_type_inference(s_type)
helper.append_op(
type='reduce_max',
inputs={'X': s_abs},
outputs={'Out': max_singular_val},
attrs={'dim': [-1],
'keep_dim': True,
'reduce_all': False})
rcond = layers.fill_constant(shape=[1], value=rcond, dtype=s_type)
cutoff = rcond * max_singular_val
y = float('inf')
y = layers.fill_constant(shape=[1], value=y, dtype=s_type)
condition = s_abs > cutoff
cond_int = layers.cast(condition, s_type)
cond_not_int = layers.cast(layers.logical_not(condition), s_type)
out1 = layers.elementwise_mul(1 / s, cond_int)
out2 = layers.elementwise_mul(1 / y, cond_not_int)
singular = layers.elementwise_add(out1, out2)
st = helper.create_variable_for_type_inference(dtype=s_type)
st_shape = helper.create_variable_for_type_inference(dtype=s_type)
helper.append_op(
type='unsqueeze2',
inputs={'X': singular},
attrs={'axes': [-2]},
outputs={'Out': st,
'XShape': st_shape})
out_1 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='elementwise_mul',
inputs={'X': u,
'Y': st},
outputs={'Out': out_1},
attrs={'axis': -1,
'use_mkldnn': False})
out_1 = helper.append_activation(out_1)
u_conj = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='conj', inputs={'X': u}, outputs={'Out': [u_conj]})
out_2 = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': out_1,
'Y': u_conj},
outputs={'Out': out_2},
attrs={'trans_x': False,
'trans_y': True}, )
return out_2
def solve(x, y, name=None):
r"""
Computes the solution of a square system of linear equations with a unique solution for input 'X' and 'Y'.
Let :math: `X` be a sqaure matrix or a batch of square matrices, :math:`Y` be
a vector/matrix or a batch of vectors/matrices, the equation should be:
.. math::
Out = X^-1 * Y
Specifically,
- This system of linear equations has one solution if and only if input 'X' is invertible.
Args:
x (Tensor): A square matrix or a batch of square matrices. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): A vector/matrix or a batch of vectors/matrices. Its shape should be `[*, M, K]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of a square system of linear equations with a unique solution for input 'x' and 'y'.
Its data type should be the same as that of `x`.
Examples:
.. code-block:: python
# a square system of linear equations:
# 2*X0 + X1 = 9
# X0 + 2*X1 = 8
import paddle
import numpy as np
np_x = np.array([[3, 1],[1, 2]])
np_y = np.array([9, 8])
x = paddle.to_tensor(np_x, dtype="float64")
y = paddle.to_tensor(np_y, dtype="float64")
out = paddle.linalg.solve(x, y)
print(out)
# [2., 3.])
"""
if paddle.in_dynamic_mode():
return _C_ops.solve(x, y)
inputs = {"X": [x], "Y": [y]}
helper = LayerHelper("solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="solve", inputs={"X": x,
"Y": y}, outputs={"Out": out})
return out
def triangular_solve(x,
y,
upper=True,
transpose=False,
unitriangular=False,
name=None):
r"""
Computes the solution of a system of equations with a triangular coefficient matrix `x` and
multiple right-hand sides `y` .
Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs
is also batches.
Args:
x (Tensor): The input triangular coefficient matrix. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is
zero or more batch dimensions. Its data type should be float32 or float64.
upper (bool, optional): Whether to solve the upper-triangular system of equations (default) or the lower-triangular
system of equations. Default: True.
transpose (bool, optional): whether `x` should be transposed before calculation. Default: False.
unitriangular (bool, optional): whether `x` is unit triangular. If True, the diagonal elements of `x` are assumed
to be 1 and not referenced from `x` . Default: False.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of the system of equations. Its data type should be the same as that of `x`.
Examples:
.. code-block:: python
# a square system of linear equations:
# x1 + x2 + x3 = 0
# 2*x2 + x3 = -9
# -x3 = 5
import paddle
import numpy as np
x = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 0,-1]], dtype="float64")
y = paddle.to_tensor([[0], [-9], [5]], dtype="float64")
out = paddle.linalg.triangular_solve(x, y, upper=True)
print(out)
# [7, -2, -5]
"""
if paddle.in_dynamic_mode():
return _C_ops.triangular_solve(x, y, 'upper', upper, 'transpose',
transpose, 'unitriangular',
unitriangular)
inputs = {"X": [x], "Y": [y]}
helper = LayerHelper("triangular_solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'triangular_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'triangular_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='triangular_solve',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={
'upper': upper,
'transpose': transpose,
'unitriangular': unitriangular
})
return out
def cholesky_solve(x, y, upper=False, name=None):
r"""
Solves a linear system of equations A @ X = B, given A's Cholesky factor matrix u and matrix B.
Input `x` and `y` is 2D matrices or batches of 2D matrices. If the inputs are batches, the outputs
is also batches.
Args:
x (Tensor): The input matrix which is upper or lower triangular Cholesky factor of square matrix A. Its shape should be `[*, M, M]`, where `*` is zero or
more batch dimensions. Its data type should be float32 or float64.
y (Tensor): Multiple right-hand sides of system of equations. Its shape should be `[*, M, K]`, where `*` is
zero or more batch dimensions. Its data type should be float32 or float64.
upper (bool, optional): whether to consider the Cholesky factor as a lower or upper triangular matrix. Default: False.
name(str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The solution of the system of equations. Its data type is the same as that of `x`.
Examples:
.. code-block:: python
import paddle
u = paddle.to_tensor([[1, 1, 1],
[0, 2, 1],
[0, 0,-1]], dtype="float64")
b = paddle.to_tensor([[0], [-9], [5]], dtype="float64")
out = paddle.linalg.cholesky_solve(b, u, upper=True)
print(out)
# [-2.5, -7, 9.5]
"""
if paddle.in_dynamic_mode():
return _C_ops.cholesky_solve(x, y, 'upper', upper)
helper = LayerHelper("cholesky_solve", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'cholesky_solve')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'cholesky_solve')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='cholesky_solve',
inputs={'X': x,
'Y': y},
outputs={'Out': out},
attrs={'upper': upper})
return out
def eigvalsh(x, UPLO='L', name=None):
"""
Computes the eigenvalues of a
complex Hermitian (conjugate symmetric) or a real symmetric matrix.
Args:
x (Tensor): A tensor with shape :math:`[_, M, M]` , The data type of the input Tensor x
should be one of float32, float64, complex64, complex128.
UPLO(str, optional): Lower triangular part of a (‘L’, default) or the upper triangular part (‘U’).
name(str, optional): The default value is None. Normally there is no need for user to set this
property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor: The tensor eigenvalues in ascending order.
Examples:
.. code-block:: python
import numpy as np
import paddle
x_data = np.array([[1, -2j], [2j, 5]])
x = paddle.to_tensor(x_data)
out_value = paddle.eigvalsh(x, UPLO='L')
print(out_value)
#[0.17157288, 5.82842712]
"""
if paddle.in_dynamic_mode():
is_test = x.stop_gradient
values, _ = _C_ops.eigvalsh(x, 'UPLO', UPLO, 'is_test', is_test)
return values
def __check_input(x, UPLO):
x_shape = list(x.shape)
if len(x.shape) < 2:
raise ValueError(
"Input(input) only support >=2 tensor, but received "
"length of Input(input) is %s." % len(x.shape))
if x_shape[-1] != x_shape[-2]:
raise ValueError(
"The input matrix must be batches of square matrices. But received x's dimention: {}".
format(x_shape))
if UPLO != 'L' and UPLO != 'U':
raise ValueError(
"UPLO must be L or U. But received UPLO is: {}".format(UPLO))
__check_input(x, UPLO)
helper = LayerHelper('eigvalsh', **locals())
check_variable_and_dtype(x, 'dtype',
['float32', 'float64', 'complex64', 'complex128'],
'eigvalsh')
out_value = helper.create_variable_for_type_inference(dtype=x.dtype)
out_vector = helper.create_variable_for_type_inference(dtype=x.dtype)
is_test = x.stop_gradient
helper.append_op(
type='eigvalsh',
inputs={'X': x},
outputs={'Eigenvalues': out_value,
'Eigenvectors': out_vector},
attrs={'UPLO': UPLO,
'is_test': is_test})
return out_value
def lstsq(x, y, rcond=None, driver=None, name=None):
"""
Computes a solution to
the least squares problem of a system of linear equations.
Args:
x (Tensor): A tensor with shape ``(*, M, N)`` , the data type of the input Tensor ``x``
should be one of float32, float64.
y (Tensor): A tensor with shape ``(*, M, K)`` , the data type of the input Tensor ``y``
should be one of float32, float64.
rcond(float, optional): The default value is None. A float pointing number used to determine
the effective rank of ``x``. If ``rcond`` is None, it will be set to max(M, N) times the
machine precision of x_dtype.
driver(str, optional): The default value is None. The name of LAPACK method to be used. For
CPU inputs the valid values are ‘gels’, ‘gelsy’, ‘gelsd, ‘gelss’. For CUDA input, the only
valid driver is ‘gels’. If ``driver`` is None, ‘gelsy’ is used for CPU inputs and ‘gels’
for CUDA inputs.
name(str, optional): The default value is None. Normally there is no need for user to set
this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tuple: A tuple of 4 Tensors which is (``solution``, ``residuals``, ``rank``, ``singular_values``).
``solution`` is a tensor with shape ``(*, N, K)``, meaning the least squares solution. ``residuals``
is a tensor with shape ``(*, K)``, meaning the squared residuals of the solutions, which is computed
when M > N and every matrix in ``x`` is full-rank, otherwise return an empty tensor. ``rank`` is a tensor
with shape ``(*)``, meaning the ranks of the matrices in ``x``, which is computed when ``driver`` in
(‘gelsy’, ‘gelsd’, ‘gelss’), otherwise return an empty tensor. ``singular_values`` is a tensor with
shape ``(*, min(M, N))``, meaning singular values of the matrices in ``x``, which is computed when
``driver`` in (‘gelsd’, ‘gelss’), otherwise return an empty tensor.
Examples:
.. code-block:: python
import paddle
paddle.set_device("cpu")
x = paddle.to_tensor([[1, 3], [3, 2], [5, 6.]])
y = paddle.to_tensor([[3, 4, 6], [5, 3, 4], [1, 2, 1.]])
results = paddle.linalg.lstsq(x, y, driver="gelsd")
print(results[0])
# [[ 0.78350395, -0.22165027, -0.62371236],
# [-0.11340097, 0.78866047, 1.14948535]]
print(results[1])
# [19.81443405, 10.43814468, 30.56185532])
print(results[2])
# 2
print(results[3])
# [9.03455734, 1.54167950]
x = paddle.to_tensor([[10, 2, 3], [3, 10, 5], [5, 6, 12.]])
y = paddle.to_tensor([[4, 2, 9], [2, 0, 3], [2, 5, 3.]])
results = paddle.linalg.lstsq(x, y, driver="gels")
print(results[0])
# [[ 0.39386186, 0.10230173, 0.93606132],
# [ 0.10741687, -0.29028133, 0.11892585],
# [-0.05115091, 0.51918161, -0.19948854]]
print(results[1])
# []
"""
device = paddle.get_device()
if device == "cpu":
if driver not in (None, "gels", "gelss", "gelsd", "gelsy"):
raise ValueError(
"Only support valid driver is 'gels', 'gelss', 'gelsd', 'gelsy' or None for CPU inputs. But got {}".
format(driver))
driver = "gelsy" if driver is None else driver
elif "gpu" in device:
if driver not in (None, "gels"):
raise ValueError(
"Only support valid driver is 'gels' or None for CUDA inputs. But got {}".
format(driver))
driver = "gels" if driver is None else driver
else:
raise RuntimeError("Only support lstsq api for CPU or CUDA device.")
if x.dtype == y.dtype and x.dtype in (paddle.float32, paddle.float64):
pass
else:
raise ValueError(
"Only support x and y have the same dtype such as 'float32' and 'float64'."
)
if rcond is None:
if x.dtype == paddle.float32:
rcond = 1e-7 * max(x.shape[-2], x.shape[-1])
elif x.dtype == paddle.float64:
rcond = 1e-15 * max(x.shape[-2], x.shape[-1])
if paddle.in_dynamic_mode():
solution, rank, singular_values = _C_ops.lstsq(x, y, "rcond", rcond,
"driver", driver)
if x.shape[-2] > x.shape[-1]:
matmul_out = _varbase_creator(dtype=x.dtype)
_C_ops.matmul(x, solution, matmul_out, 'trans_x', False, 'trans_y',
False)
minus_out = _C_ops.elementwise_sub(matmul_out, y)
pow_out = _C_ops.pow(minus_out, 'factor', 2)
residuals = _C_ops.reduce_sum(pow_out, 'dim', [-2], 'keepdim',
False, 'reduce_all', False)
else:
residuals = paddle.empty(shape=[0], dtype=x.dtype)
if driver == "gels":
rank = paddle.empty(shape=[0], dtype=paddle.int32)
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
elif driver == "gelsy":
singular_values = paddle.empty(shape=[0], dtype=x.dtype)
return solution, residuals, rank, singular_values
helper = LayerHelper('lstsq', **locals())
check_variable_and_dtype(
x, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
check_variable_and_dtype(
y, 'dtype', ['float32', 'float64', 'complex64', 'complex128'], 'lstsq')
solution = helper.create_variable_for_type_inference(dtype=x.dtype)
residuals = helper.create_variable_for_type_inference(dtype=x.dtype)
rank = helper.create_variable_for_type_inference(dtype=paddle.int32)
singular_values = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='lstsq',
inputs={'X': x,
'Y': y},
outputs={
'Solution': solution,
'Rank': rank,
'SingularValues': singular_values
},
attrs={'rcond': rcond,
'driver': driver})
matmul_out = helper.create_variable_for_type_inference(dtype=x.dtype)
minus_out = helper.create_variable_for_type_inference(dtype=x.dtype)
pow_out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='matmul_v2',
inputs={'X': x,
'Y': solution},
outputs={'Out': matmul_out},
attrs={
'trans_x': False,
'trans_y': False,
})
helper.append_op(
type='elementwise_sub',
inputs={'X': matmul_out,
'Y': y},
outputs={'Out': minus_out})
helper.append_op(
type='pow',
inputs={'X': minus_out},
outputs={'Out': pow_out},
attrs={'factor': 2})
helper.append_op(
type='reduce_sum',
inputs={'X': pow_out},
outputs={'Out': residuals},
attrs={'dim': [-2],
'keep_dim': False,
'reduce_all': False})
if driver == "gels":
rank = paddle.static.data(name='rank', shape=[0])
singular_values = paddle.static.data(name='singular_values', shape=[0])
elif driver == "gelsy":
singular_values = paddle.static.data(name='singular_values', shape=[0])
return solution, residuals, rank, singular_values
| 39.074185 | 182 | 0.553913 | [
"Apache-2.0"
] | DevilCarp/Paddle | python/paddle/tensor/linalg.py | 117,517 | Python |
import _plotly_utils.basevalidators
class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="ticks", parent_name="histogram2dcontour.colorbar", **kwargs
):
super(TicksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["outside", "inside", ""]),
**kwargs
)
| 33.4 | 86 | 0.644711 | [
"MIT"
] | 1abner1/plotly.py | packages/python/plotly/plotly/validators/histogram2dcontour/colorbar/_ticks.py | 501 | Python |
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Manipulating GPflow models
#
# One of the key ingredients in GPflow is the model class, which enables you to carefully control parameters. This notebook shows how some of these parameter control features work, and how to build your own model with GPflow. First we'll look at:
#
# - how to view models and parameters
# - how to set parameter values
# - how to constrain parameters (for example, variance > 0)
# - how to fix model parameters
# - how to apply priors to parameters
# - how to optimize models
#
# Then we'll show how to build a simple logistic regression model, demonstrating the ease of the parameter framework.
#
# GPy users should feel right at home, but there are some small differences.
#
# First, let's deal with the usual notebook boilerplate and make a simple GP regression model. See [Basic (Gaussian likelihood) GP regression model](../basics/regression.ipynb) for specifics of the model; we just want some parameters to play with.
# %%
import numpy as np
import gpflow
import tensorflow_probability as tfp
from gpflow.utilities import print_summary, set_trainable, to_default_float
# %% [markdown]
# We begin by creating a very simple GP regression model:
# %%
# generate toy data
np.random.seed(1)
X = np.random.rand(20, 1)
Y = np.sin(12 * X) + 0.66 * np.cos(25 * X) + np.random.randn(20, 1) * 0.01
m = gpflow.models.GPR((X, Y), kernel=gpflow.kernels.Matern32() + gpflow.kernels.Linear())
# %% [markdown]
# ## Viewing, getting, and setting parameters
# You can display the state of the model in a terminal by using `print_summary(m)`. You can change the display format using the `fmt` keyword argument, e.g. `'html'`. In a notebook, you can also use `fmt='notebook'` or set the default printing format as `notebook`:
# %%
print_summary(m, fmt="notebook")
# %%
gpflow.config.set_default_summary_fmt("notebook")
# %% [markdown]
# This model has four parameters. The kernel is made of the sum of two parts. The first (counting from zero) is a Matern32 kernel that has a variance parameter and a lengthscales parameter; the second is a linear kernel that has only a variance parameter. There is also a parameter that controls the variance of the noise, as part of the likelihood.
#
# All the model variables have been initialized at `1.0`. You can access individual parameters in the same way that you display the state of the model in a terminal; for example, to see all the parameters that are part of the likelihood, run:
# %%
print_summary(m.likelihood)
# %% [markdown]
# This gets more useful with more complex models!
# %% [markdown]
# To set the value of a parameter, just use `assign()`:
# %%
m.kernel.kernels[0].lengthscales.assign(0.5)
m.likelihood.variance.assign(0.01)
print_summary(m, fmt="notebook")
# %% [markdown]
# ## Constraints and trainable variables
#
# GPflow helpfully creates an unconstrained representation of all the variables. In the previous example, all the variables are constrained positively (see the **transform** column in the table); the unconstrained representation is given by $\alpha = \log(\exp(\theta)-1)$. The `trainable_parameters` property returns the constrained values:
# %%
m.trainable_parameters
# %% [markdown]
# Each parameter has an `unconstrained_variable` attribute that enables you to access the unconstrained value as a TensorFlow `Variable`.
# %%
p = m.kernel.kernels[0].lengthscales
p.unconstrained_variable
# %% [markdown]
# You can also check the unconstrained value as follows:
# %%
p.transform.inverse(p)
# %% [markdown]
# Constraints are handled by the Bijector classes from the `tensorflow_probability` package. You might prefer to use the constraint $\alpha = \log(\theta)$; this is easily done by replacing the parameter with one that has a different `transform` attribute (here we make sure to copy all other attributes across from the old parameter; this is not necessary when there is no `prior` and the `trainable` state is still the default of `True`):
# %%
old_parameter = m.kernel.kernels[0].lengthscales
new_parameter = gpflow.Parameter(
old_parameter,
trainable=old_parameter.trainable,
prior=old_parameter.prior,
name=old_parameter.name.split(":")[0], # tensorflow is weird and adds ':0' to the name
transform=tfp.bijectors.Exp(),
)
m.kernel.kernels[0].lengthscales = new_parameter
# %% [markdown]
# Though the lengthscale itself remains the same, the unconstrained lengthscale has changed:
# %%
p.transform.inverse(p)
# %% [markdown]
# You can also change the `transform` attribute in place:
# %%
m.kernel.kernels[0].variance.transform = tfp.bijectors.Exp()
# %%
print_summary(m, fmt="notebook")
# %% [markdown]
# ## Changing whether a parameter will be trained in optimization
#
# Another helpful feature is the ability to fix parameters. To do this, simply set the `trainable` attribute to `False`; this is shown in the **trainable** column of the representation, and the corresponding variable is removed from the free state.
# %%
set_trainable(m.kernel.kernels[1].variance, False)
print_summary(m)
# %%
m.trainable_parameters
# %% [markdown]
# To unfix a parameter, just set the `trainable` attribute to `True` again.
# %%
set_trainable(m.kernel.kernels[1].variance, True)
print_summary(m)
# %% [markdown]
# **NOTE:** If you want to recursively change the `trainable` status of an object that *contains* parameters, you **must** use the `set_trainable()` utility function.
#
# A module (e.g. a model, kernel, likelihood, ... instance) does not have a `trainable` attribute:
# %%
try:
m.kernel.trainable
except AttributeError:
print(f"{m.kernel.__class__.__name__} does not have a trainable attribute")
# %%
set_trainable(m.kernel, False)
print_summary(m)
# %% [markdown]
# ## Priors
#
# You can set priors in the same way as transforms and trainability, by using `tensorflow_probability` distribution objects. Let's set a Gamma prior on the variance of the Matern32 kernel.
# %%
k = gpflow.kernels.Matern32()
k.variance.prior = tfp.distributions.Gamma(to_default_float(2), to_default_float(3))
print_summary(k)
# %%
m.kernel.kernels[0].variance.prior = tfp.distributions.Gamma(
to_default_float(2), to_default_float(3)
)
print_summary(m)
# %% [markdown]
# ## Optimization
#
# To optimize your model, first create an instance of an optimizer (in this case, `gpflow.optimizers.Scipy`), which has optional arguments that are passed to `scipy.optimize.minimize` (we minimize the negative log likelihood). Then, call the `minimize` method of that optimizer, with your model as the optimization target. Variables that have priors are maximum a priori (MAP) estimated, that is, we add the log prior to the log likelihood, and otherwise use Maximum Likelihood.
# %%
def closure():
return -m.log_marginal_likelihood()
opt = gpflow.optimizers.Scipy()
opt.minimize(closure, variables=m.trainable_variables)
# %% [markdown]
# ## Building new models
#
# To build new models, you'll need to inherit from `gpflow.models.BayesianModel`. Parameters are instantiated with `gpflow.Parameter`. You might also be interested in `tf.Module`, which acts as a 'container' for `Parameter`s (for example, kernels are `tf.Module`s).
#
# In this very simple demo, we'll implement linear multiclass classification.
#
# There are two parameters: a weight matrix and a bias (offset). The key thing to implement the `log_likelihood` method, which returns a TensorFlow scalar that represents the (log) likelihood. You can use parameter objects inside `log_likelihood`.
#
# %%
import tensorflow as tf
class LinearMulticlass(gpflow.models.BayesianModel):
def __init__(self, X, Y, name=None):
super().__init__(name=name) # always call the parent constructor
self.X = X.copy() # X is a NumPy array of inputs
self.Y = Y.copy() # Y is a 1-of-k (one-hot) representation of the labels
self.num_data, self.input_dim = X.shape
_, self.num_classes = Y.shape
# make some parameters
self.W = gpflow.Parameter(np.random.randn(self.input_dim, self.num_classes))
self.b = gpflow.Parameter(np.random.randn(self.num_classes))
# ^^ You must make the parameters attributes of the class for
# them to be picked up by the model. i.e. this won't work:
#
# W = gpflow.Param(... <-- must be self.W
def log_likelihood(self): # takes no arguments
p = tf.nn.softmax(
tf.matmul(self.X, self.W) + self.b
) # Param variables are used as tensorflow arrays.
return tf.reduce_sum(tf.math.log(p) * self.Y) # be sure to return a scalar
# %% [markdown]
# ...and that's it. Let's build a really simple demo to show that it works.
# %%
np.random.seed(123)
X = np.vstack(
[
np.random.randn(10, 2) + [2, 2],
np.random.randn(10, 2) + [-2, 2],
np.random.randn(10, 2) + [2, -2],
]
)
Y = np.repeat(np.eye(3), 10, 0)
from matplotlib import pyplot as plt
plt.style.use("ggplot")
# %matplotlib inline
import matplotlib
matplotlib.rcParams["figure.figsize"] = (12, 6)
_ = plt.scatter(X[:, 0], X[:, 1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis)
# %%
m = LinearMulticlass(X, Y)
m
# %%
def closure():
return -m.log_marginal_likelihood()
opt = gpflow.optimizers.Scipy()
opt.minimize(closure, variables=m.trainable_variables)
# %%
xx, yy = np.mgrid[-4:4:200j, -4:4:200j]
X_test = np.vstack([xx.flatten(), yy.flatten()]).T
f_test = np.dot(X_test, m.W.read_value()) + m.b.read_value()
p_test = np.exp(f_test)
p_test /= p_test.sum(1)[:, None]
# %%
plt.figure(figsize=(12, 6))
for i in range(3):
plt.contour(xx, yy, p_test[:, i].reshape(200, 200), [0.5], colors="k", linewidths=1)
_ = plt.scatter(X[:, 0], X[:, 1], 100, np.argmax(Y, 1), lw=2, cmap=plt.cm.viridis)
# %% [markdown]
# That concludes the new model example and this notebook. You might want to see for yourself that the `LinearMulticlass` model and its parameters have all the functionality demonstrated here. You could also add some priors and run Hamiltonian Monte Carlo using the HMC optimizer `gpflow.train.HMC` and its `sample` method. See [Markov Chain Monte Carlo (MCMC)](../advanced/mcmc.ipynb) for more information on running the sampler.
| 36.870629 | 478 | 0.715789 | [
"Apache-2.0"
] | christabella/GPflow | doc/source/notebooks/understanding/models.pct.py | 10,545 | Python |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_scale_io_persistent_volume_source import V1ScaleIOPersistentVolumeSource
class TestV1ScaleIOPersistentVolumeSource(unittest.TestCase):
""" V1ScaleIOPersistentVolumeSource unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1ScaleIOPersistentVolumeSource(self):
"""
Test V1ScaleIOPersistentVolumeSource
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1_scale_io_persistent_volume_source.V1ScaleIOPersistentVolumeSource()
pass
if __name__ == '__main__':
unittest.main()
| 24.177778 | 112 | 0.742647 | [
"Apache-2.0"
] | MiaoRachelYu/python | kubernetes/test/test_v1_scale_io_persistent_volume_source.py | 1,088 | Python |
#%%
import cv2
from pathlib import Path
#%%
root = Path(__file__).resolve().absolute().parent
jorge_path = root / "jorge"
jorge_dst_path = root / "jorge_100"
marissa_path = root / "marissa"
marissa_dst_path = root / "marissa_100"
#%%
for f in jorge_path.iterdir():
old_image = cv2.imread(str(f))
image = cv2.resize(old_image, 100)
print(image) | 22 | 50 | 0.663102 | [
"Apache-2.0"
] | JorgeGarciaIrazabal/ml-face-detector | scale_image.py | 374 | Python |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
from openvino.tools.mo.ops.ReduceOps import ReduceProd, ReduceAnd, ReduceMax, ReduceMean, ReduceSum, ReduceL2, ReduceMin
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.graph.graph import Node
class AllFrontExtractor(FrontExtractorOp):
op = 'All'
enabled = True
@classmethod
def extract(cls, node: Node):
keep_dims = node.pb.attr['keep_dims'].b
ReduceAnd.update_node_stat(node, {'keep_dims': keep_dims})
return cls.enabled
class MaxFrontExtractor(FrontExtractorOp):
op = 'Max'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMax.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b})
return cls.enabled
class MinFrontExtractor(FrontExtractorOp):
op = 'Min'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMin.update_node_stat(node, {'keep_dims': node.pb.attr['keep_dims'].b})
return cls.enabled
class MeanExtractor(FrontExtractorOp):
op = 'Mean'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceMean.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class ProdFrontExtractor(FrontExtractorOp):
op = 'Prod'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceProd.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class SumFrontExtractor(FrontExtractorOp):
op = 'Sum'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceSum.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
class EuclideanNormFrontExtractor(FrontExtractorOp):
op = 'EuclideanNorm'
enabled = True
@classmethod
def extract(cls, node: Node):
ReduceL2.update_node_stat(node, {'keep_dims': node.pb.attr["keep_dims"].b})
return cls.enabled
| 26.474359 | 120 | 0.685714 | [
"Apache-2.0"
] | IndiraSalyahova/openvino | tools/mo/openvino/tools/mo/front/tf/reduce_ext.py | 2,065 | Python |
#!/usr/bin/python
"""
A Python program that creates a list. One of the elements of the list should be
a dictionary with at least two keys. Write this list out to a file using both
YAML and JSON formats. The YAML file should be in the expanded form.
"""
import yaml
import json
a = {
'name': 'router1',
'ip_addr': '1.2.3.4',
'serial_number': 'FTX000232',
'os_version': '12.4.15T',
'optional_attrib_1': 'foo',
}
b = {
'name': 'router2',
'ip_addr': '5.6.7.8',
'serial_number': 'FTX345632',
'os_version': '12.4.15T',
}
example_list = [a, b, "empty1", "empty2"]
print "Here is the list"
print "----------------"
print example_list
print "----------------\n"
print "Here is the list in YAML"
print "------------------------"
print yaml.dump(example_list, default_flow_style=False)
print "------------------------"
print "Here is the list in JSON"
print "------------------------"
print json.dumps(example_list)
print "------------------------"
with open("example_yaml.yml", "w") as f:
f.write(yaml.dump(example_list, default_flow_style=False))
with open("example_json.json", "w") as f:
f.write(json.dumps(example_list))
| 25.066667 | 79 | 0.617908 | [
"Apache-2.0"
] | melphick/pynet | week1/w1e6.py | 1,128 | Python |
import pandas as pd
import datetime
import matplotlib.pyplot as plt
import ast
from gensim.parsing.preprocessing import STOPWORDS
from nltk.corpus import stopwords
from collections import defaultdict
from nltk.stem import WordNetLemmatizer
import datetime
stop_words = stopwords.words('english')
lemmatizer = WordNetLemmatizer()
"""
Dates and dico
"""
df_sentiment = pd.read_excel('/Users/etiennelenaour/Desktop/Stage/vocab_sentiment.xlsx')
project_directory = '/Users/etiennelenaour/Desktop/Stage/'
l_month = ['January','February','March','April','May','June','July','August','September','October','November','December']
l_dates = list()
with open ('/Users/etiennelenaour/Desktop/Stage/csv_files/dates_fomc.csv', 'r') as doc :
head = doc.readline()
dates = doc.readlines()
dates_to_chg = []
for line in dates :
if line.split(',')[1] == ' Y' :
dates_to_chg += [line.split(';')[0]]
date = 0
m = 1
for month in l_month :
if month[:3] == line.split(';')[0].split('/')[0] :
date += 100 * m
m += 1
date += int(line.split(',')[0].split('/')[2])*10000
date += int(line.split(',')[0].split('/')[1])
l_dates.append(date)
l_dates_final = l_dates[101:]
date_to_append = [20120125, 20120425, 20120620, 20120801, 20120913, 20121024, 20121212, 20130130,
20130130, 20130320, 20130501, 20130619, 20130918, 20131030, 20131218, 20140129,
20140129, 20140430, 20140618, 20140917, 20141029, 20141217]
for date in date_to_append:
l_dates_final.append(date)
"""
cleaning functions
"""
def clean_dico_new_line(dico):
new_dico = defaultdict(lambda: list())
for keys, list_dico in dico.items():
new_liste = [string.rstrip("\\n").lower() for string in list_dico]
new_dico[keys] = new_liste
return new_dico
def remove_stop_word(dico):
new_dico = defaultdict(lambda: list())
for keys, list_dico in dico.items():
final_list = list()
for ele in list_dico:
if (ele not in STOPWORDS) and (ele not in stop_words):
final_list.append(ele)
new_dico[keys] = final_list
return new_dico
def remove_nan_from_list(liste):
new_liste = list()
for ele in liste:
if type(ele) == str:
new_liste.append(ele)
else:
pass
return new_liste
"""
Score functions
"""
negative_word_list = [ele.lower() for ele in df_sentiment.Negative.tolist()]
positive_word_list = [ele.lower() for ele in remove_nan_from_list(df_sentiment.Positive.tolist())]
def compute_positivity(dico):
""" This computes the positivity score of each statement.
Takes a dictionary with each statement as liste item and the corresponding interlocutor's name in names item
"""
dico_score = defaultdict(lambda: list())
for name, liste in dico.items():
neg_score = 0
pos_score = 0
for ele in liste:
if ele in negative_word_list:
neg_score += 1
elif ele in positive_word_list:
pos_score += 1
else:
pass
if neg_score < 30 or pos_score < 30:
pass
else:
score = (pos_score - neg_score) / (pos_score + neg_score)
dico_score[name] = score
return dico_score
def compute_mean_positivity(dico):
neg_score = 0
pos_score = 0
for liste in dico.values():
for ele in liste:
if ele in negative_word_list:
neg_score += 1
elif ele in positive_word_list:
pos_score += 1
else:
pass
score = (pos_score - neg_score) / (pos_score + neg_score)
return score
"""
Date function
"""
def from_int_dates(integ):
string = str(integ)
new_string = string[0]+ string[1] + string[2] + string[3] + "/" + string[4] + string[5] + "/" + string[6] + string[7]
return datetime.datetime.strptime(new_string, "%Y/%m/%d")
"""
plot positivity
"""
def plot_positivity_persons(date, dico_score, score_moyen):
list_score = list()
list_names = list()
for name, score in dico_score.items():
list_score.append(score)
list_names.append(name)
plt.bar(list_names, list_score, color='r')
plt.grid()
plt.xticks(rotation=90)
plt.text(-1, 0, date, horizontalalignment='left', verticalalignment='top', fontweight='bold')
plt.hlines(y=score_moyen, xmin = -1, xmax = len(list_names))
plt.ylabel("Score de positivité")
plt.title("Score de positivité des principaux speakers")
plt.tight_layout()
#plt.show()
plt.savefig(project_directory + 'image_score_posi/' + 'score_posi_' + str(date) + '.png')
plt.close()
return None
"""
Main
"""
for date in l_dates_final[-50:]:
with open (project_directory+'sentences_by_names/'+str(date)+'meeting.txt', 'r') as doc:
content = doc.readlines()[0]
dictionary = ast.literal_eval(content)
#Cleaning
dico_clean = remove_stop_word(clean_dico_new_line(dictionary))
plot_positivity_persons(date, compute_positivity(dico_clean), compute_mean_positivity(dico_clean))
| 20.703252 | 121 | 0.650697 | [
"MIT"
] | erialc-cal/NLP-FOMC | RA_project/code_python/image_score_posi.py | 5,095 | Python |
from nose import with_setup
from pygears import Intf, clear
from pygears.typing import Queue, Uint
from utils import svgen_check
@with_setup(clear)
@svgen_check(['sieve_0v2_7_8v10.sv'])
def test_uint():
iout = Intf(Uint[10])[:2, 7, 8:]
assert iout.dtype == Uint[5]
@with_setup(clear)
@svgen_check(['sieve_0v2_3_5v7.sv'])
def test_queue():
iout = Intf(Queue[Uint[2], 6])[:2, 3, 5:]
assert iout.dtype == Queue[Uint[2], 4]
| 21.047619 | 45 | 0.690045 | [
"MIT"
] | Risto97/pygears | tests/svgen/test_sieve.py | 442 | Python |
import socket
class UserException(Exception):
pass
def user_exception(s): raise UserException(s)
class Macro:
"""Represents a macro to be run"""
def __init__(self, code):
"""code: int - index of macro to run"""
self.code = code
class Command:
"""Represents a macro to be run"""
def __init__(self, command):
"""command: string - command to send"""
self.command = command
class HomeVisionController:
def __init__(
self,
ip_address,
port,
auth,
on_off_appliance_codes={},
actions={},
process_actions={},
var_queries={},
flag_queries={},
flag_return_values = {True: ["True", "On", "Yes", "Occupied", "Set", "1"], False: ["False", "Off", "No", "Vacant", "Clear", "0"]},
on_off_commands = None
):
"""
Args:
ip_address: string
port: int
auth: string
- key for authenticating with netio
on_off_appliance_codes: dict[string] => int - codes to be fed to 'on_off_commands' for each appliance
actions: dict[string] => Macro/Command/(_, _, ...) - named actions to be completed
process_actions: dict[string] => {"START": X, "STOP": X} where X is Macro/Command/(_, _, ...) - named processes to be started and stopped
var_queries: dict[string] => int - mapping of names to variable indexes
flag_queries: dict[string] => int - mapping of names to flag indexes
flag_return_values: {True: [string], False: [string]} - synonyms for true and false that are returned by netio 'read flag command'. (ignore if you haven't set them up)
on_off_commands: {"ON": (int) => Macro/Command/(_, _, ...), "OFF": (int) => Macro/Command} - how to handle on and off commands
"""
self.ip_address = ip_address
self.port = port
self.auth = auth
self.on_off_appliance_codes = on_off_appliance_codes
self.actions = actions
self.process_actions = process_actions
self.var_queries = var_queries
self.flag_queries = flag_queries
self.flag_return_values = flag_return_values
self.on_off_commands = on_off_commands
def on_off_command(self, details):
"""Send an on or off command to an appliance
Sends the specified command to the homevision through netio interface to control the specified appliance.
Args:
details: {"appliance": string, "state": string}
"""
if "appliance" not in details:
raise Exception("appliance not specified")
elif "state" not in details:
raise Exception("state not specified")
if details["appliance"] not in self.on_off_appliance_codes.keys():
raise Exception("appliance not supported. Must be one of: " + ",".join(self.on_off_appliance_codes.keys()))
appliance_code = self.on_off_appliance_codes[details["appliance"]]
if details['state'] == "ON":
self._switch_on(appliance_code)
elif details["state"] == "OFF":
self._switch_off(appliance_code)
else:
raise Exception("state not supported. Must be either \"ON\" or \"OFF\".")
def action_command(self, details):
"""Send an action command
Sends the specified command to the homevision through netio interface.
Args:
details: {"command": string}
"""
if "command" not in details:
raise Exception("Command not specified")
if details["command"] not in self.actions.keys():
raise Exception("Command not supported. Must be one of: " + ",".join(self.actions.keys()))
self._handle_action(self.actions[details["command"]])
def start_stop_command(self, details):
"""Starts or stops a process
Sends the specified command to the homevision through netio interface to control the specified process.
Args:
details: {"action": string, "process": string}
"""
if "action" not in details:
raise Exception("action not specified")
elif "process" not in details:
raise Exception("process not specified")
if details["process"] not in self.process_actions.keys():
raise Exception("process not supported. Must be one of: " + ",".join(self.process_actions.keys()))
if details['action'] == "START":
self._handle_action(self.process_actions[details["process"]]["START"])
elif details["action"] == "STOP":
self._handle_action(self.process_actions[details["process"]]["STOP"])
else:
raise Exception("action not supported. Must be either \"START\" or \"STOP\".")
def _handle_action(self, action):
def handle_single(a):
if type(a) == Macro:
self._run_macro(a.code)
elif type(a) == Command:
self._send_command(a.command)
elif type(a) == Exception:
raise a
else:
raise Exception("Internal Error: invalid action type. Should be Macro, Command or Exception")
if type(action) == tuple:
for a in action:
handle_single(a)
else:
handle_single(action)
def var_query(self, details):
"""Returns the answer to a query on variable
Returns the answer to a query on the specified variable using netio
Args:
details: {"query": string}
"""
if "query" not in details:
raise Exception("query not specified")
if details["query"] not in self.var_queries.keys():
raise Exception("query not supported. Must be one of: " + ",".join(self.var_queries.keys()))
code = self.var_queries[details["query"]]
if type(code) == int:
val = self._get_var(code)
elif type(code) == tuple:
val = [self._get_var(c) for c in code]
else:
raise Exception("Internal Exception: variable code is not valid")
return val
def flag_query(self, details):
"""Returns the answer to a query on flag
Returns the answer to a query on the specified variable using netio
Args:
details: {"query": string}
"""
if "query" not in details:
raise Exception("query not specified")
if details["query"] not in self.flag_queries.keys():
raise Exception("query not supported. Must be one of: " + ",".join(self.flag_queries.keys()))
val = self._get_flag(self.flag_queries[details["query"]])
return "yes" if val else "no"
def _switch_on(self, code):
if self.on_off_commands == None:
raise Exception("No On/Off command set")
self._handle_action(self.on_off_commands["ON"](code))
def _switch_off(self, code):
if self.on_off_commands == None:
raise Exception("No On/Off command set")
self._handle_action(self.on_off_commands["OFF"](code))
def _run_macro(self, code):
self._send_command(b'action macro run ' + bytes(str(code), encoding="ascii") + b'; __wait 100')
def _send_command(self, command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip_address, self.port))
s.send(bytes("auth " + self.auth + "\n", encoding="ascii"))
s.send(command)
s.close()
def _get_var(self, id):
return int(self._run_read_command(b"get var state " + bytes(str(id), encoding="ascii")))
def _get_flag(self, id):
ret = self._run_read_command(b"get flag state " + bytes(str(id), encoding="ascii"))
if ret in self.flag_return_values[False]:
return False
elif ret in self.flag_return_values[True]:
return True
else:
raise Exception("Flag value not supported: " + ret)
def _run_read_command(self, command):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.ip_address, self.port))
s.send(bytes("auth " + self.auth + "\n", encoding="ascii"))
s.recv(10)
s.send(command)
s.send(b'\n')
response = s.recv(10).decode(encoding="ascii").rstrip()
s.close()
return response
| 34.230088 | 173 | 0.650724 | [
"MIT"
] | jackoson/homevision-netio-controller | homevision_netio_controller/controller.py | 7,736 | Python |
import os
import math
import sys
import torch
import numpy as np
from gym_collision_avoidance.envs.policies.InternalPolicy import InternalPolicy
from gym_collision_avoidance.envs import Config
from gym_collision_avoidance.envs.util import *
from gym_collision_avoidance.envs.policies import socialforce
import copy
import argparse
# Filter list by Boolean list
# Using itertools.compress
from itertools import compress
class SOCIALFORCEPolicy(InternalPolicy):
def __init__(self):
InternalPolicy.__init__(self, str="SOCIALFORCE")
self.dt = Config.DT
self.obs_seq_len = 8
self.is_init = False
def init(self,agents):
self.total_agents_num = [None]*self.n_agents
self.is_init = True
def find_next_action(self, obs, agents, i , full_agent_list = None, active_agent_mask = None):
agent_index = i
#check if elements before index contains non active agents, if yes, remove them, thus calculate the index shift
before_index = np.array(active_agent_mask)[:agent_index]
#see how many non active agents are before index, minus them calculate index shift
agent_index = agent_index - len( before_index[ before_index==False ] )
agents = list(compress(full_agent_list, active_agent_mask))
observation_array = [] #observation array for social force, consist of N row of agents, each row = vector (x, y, v_x, v_y, d_x, d_y, [tau])
if not self.is_init: #Execute one time per init (complete simulation iteration)
self.n_agents = len(agents)
self.init(agents)
#initialize the observation vector because when starts, social force seems to require a starting vel for agents to move
for a in range(self.n_agents):
pos_difference = agents[a].goal_global_frame - agents[a].pos_global_frame
dist_next_waypoint = ( pos_difference / (np.linalg.norm( pos_difference ,ord=1)+0.0000001) ) * ( agents[a].pref_speed )
vel_next_waypoint = dist_next_waypoint
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], vel_next_waypoint[0], vel_next_waypoint[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
else:
##added for dynamic num of agents compatibility
self.n_agents = len(agents)
self.init(agents)
for a in range(self.n_agents):
if agents[a].speed_global_frame<= agents[a].pref_speed/3:
pos_difference = agents[a].goal_global_frame - agents[a].pos_global_frame
dist_next_waypoint = ( pos_difference / (np.linalg.norm( pos_difference ,ord=1)+0.0000001) ) * ( agents[a].pref_speed )
vel_next_waypoint = dist_next_waypoint
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], vel_next_waypoint[0], vel_next_waypoint[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
else:
observation_array.append( [ agents[a].pos_global_frame[0], agents[a].pos_global_frame[1], agents[a].vel_global_frame[0], agents[a].vel_global_frame[1], agents[a].goal_global_frame[0], agents[a].goal_global_frame[1] ] )
#print("goal")
#print(agents[agent_index].goal_global_frame)
initial_state = np.array( observation_array )
s=None
#s = socialforce.Simulator(initial_state, delta_t=0.1)
s = socialforce.Simulator(initial_state, delta_t=0.1)
states = np.stack([s.step().state.copy() for _ in range(1)]) #step one time only
#print("states")
#print(states)
next_waypoint_x = states[:, agent_index, 0][0]
next_waypoint_y = states[:, agent_index, 1][0]
next_waypoint_vel_x = states[:, agent_index, 2][0]
next_waypoint_vel_y = states[:, agent_index, 3][0]
self.next_waypoint = np.array( [ next_waypoint_x , next_waypoint_y ] )
goal_direction = self.next_waypoint - agents[agent_index].pos_global_frame
self.dist_to_goal = math.sqrt(goal_direction[0]**2 + goal_direction[1]**2)
if self.dist_to_goal > 1e-8:
ref_prll = goal_direction / agents[agent_index].dist_to_goal
else:
ref_prll = goal_direction
ref_orth = np.array([-ref_prll[1], ref_prll[0]]) # rotate by 90 deg
ref_prll_angle_global_frame = np.arctan2(ref_prll[1],
ref_prll[0])
heading_ego_frame = wrap( agents[agent_index].heading_global_frame -
ref_prll_angle_global_frame)
vel_global_frame = np.array( [ next_waypoint_vel_x , next_waypoint_vel_y ] )#( self.next_waypoint - agents[agent_index].pos_global_frame) / agents[agent_index].dt_nominal
speed_global_frame = np.linalg.norm(vel_global_frame)
if speed_global_frame > agents[agent_index].pref_speed: speed_global_frame = agents[agent_index].pref_speed
#But in reality, the format of action is [speed, heading_delta]
action = np.array([speed_global_frame, -heading_ego_frame])
#print("action")
#print(action)
return action
#agents[agent_index].set_state( next_waypoint_x , next_waypoint_y, next_waypoint_vel_x, next_waypoint_vel_y )
#resultant_speed_global_frame = agents[agent_index].speed_global_frame
#resultant_delta_heading_global_frame = agents[agent_index].delta_heading_global_frame
###########################################################POSITION APPROACH##########################################################################
## print("position")
## print(agents[agent_index].pos_global_frame)
## next_waypoint_x = states[:, agent_index, 0][0]
## next_waypoint_y = states[:, agent_index, 1][0]
##
## next_waypoint = np.array( [ next_waypoint_x, next_waypoint_y ] )
## print("next_waypoint")
## print(next_waypoint)
##
##
##
## pos_difference = next_waypoint - agents[agent_index].pos_global_frame
## dist_next_waypoint = ( pos_difference / (np.linalg.norm( pos_difference ,ord=1)+0.0000001) ) * ( agents[agent_index].pref_speed * 0.1)
##
## position_x = agents[agent_index].pos_global_frame[0] + dist_next_waypoint[0]
## position_y = agents[agent_index].pos_global_frame[1] + dist_next_waypoint[1]
## agents[agent_index].set_state( position_x , position_y )
##
## resultant_speed_global_frame = agents[agent_index].speed_global_frame
## resultant_delta_heading_global_frame = agents[agent_index].delta_heading_global_frame
#Although documentation and code comment mentioned that action is consisted with [heading delta, speed]
#But in reality, the format of action is [speed, heading_delta]
###########################################################################################################################################
| 44.193939 | 242 | 0.63124 | [
"MIT"
] | cmubig/Social-Navigation-Simulator | gym_collision_avoidance/envs/policies/SOCIALFORCEPolicy.py | 7,292 | Python |
import json
import asynctest
from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from aries_cloudagent.config.injection_context import InjectionContext
from aries_cloudagent.messaging.request_context import RequestContext
from .....admin.request_context import AdminRequestContext
from .. import routes as test_module
from ..manager import MediationManager
from ..models.mediation_record import MediationRecord
class TestCoordinateMediationRoutes(AsyncTestCase):
def setUp(self):
self.session_inject = {}
self.context = AdminRequestContext.test_context(self.session_inject)
self.outbound_message_router = async_mock.CoroutineMock()
self.request_dict = {
"context": self.context,
"outbound_message_router": self.outbound_message_router,
}
self.request = async_mock.MagicMock(
match_info={
"mediation_id": "test-mediation-id",
"conn_id": "test-conn-id",
},
query={},
json=async_mock.CoroutineMock(return_value={}),
__getitem__=lambda _, k: self.request_dict[k],
)
serialized = {
"mediation_id": "fake_id",
"state": "granted",
"role": "server",
"connection_id": "c3dd00cf-f6a2-4ddf-93d8-49ae74bdacef",
"mediator_terms": [],
"recipient_terms": [],
"routing_keys": ["EwUKjVLboiLSuoWSEtDvrgrd41EUxG5bLecQrkHB63Up"],
"endpoint": "http://192.168.1.13:3005",
"created_at": "1234567890",
}
self.mock_record = async_mock.MagicMock(
**serialized,
serialize=async_mock.MagicMock(return_value=serialized),
save=async_mock.CoroutineMock()
)
def test_mediation_sort_key(self):
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_DENIED, "created_at": ""}
)
== "2"
)
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_REQUEST, "created_at": ""}
)
== "1"
)
assert (
test_module.mediation_sort_key(
{"state": MediationRecord.STATE_GRANTED, "created_at": ""}
)
== "0"
)
async def test_list_mediation_requests(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationRecord,
"query",
async_mock.CoroutineMock(return_value=[self.mock_record]),
) as mock_query, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.list_mediation_requests(self.request)
json_response.assert_called_once_with(
[self.mock_record.serialize.return_value]
)
mock_query.assert_called_once_with(self.context.session.return_value, {})
async def test_list_mediation_requests_filters(self):
self.request.query = {
"state": MediationRecord.STATE_GRANTED,
"conn_id": "test-conn-id",
}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationRecord,
"query",
async_mock.CoroutineMock(return_value=[self.mock_record]),
) as mock_query, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.list_mediation_requests(self.request)
json_response.assert_called_once_with(
[self.mock_record.serialize.return_value]
)
mock_query.assert_called_once_with(
self.context.session.return_value,
{
"connection_id": "test-conn-id",
"state": MediationRecord.STATE_GRANTED,
},
)
async def test_list_mediation_requests_x(self):
with async_mock.patch.object(
test_module,
"MediationRecord",
async_mock.MagicMock(
query=async_mock.CoroutineMock(side_effect=test_module.StorageError())
),
) as mock_med_rec:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.list_mediation_requests(self.request)
async def test_list_mediation_requests_no_records(self):
with async_mock.patch.object(
test_module,
"MediationRecord",
async_mock.MagicMock(query=async_mock.CoroutineMock(return_value=[])),
) as mock_med_rec, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.list_mediation_requests(self.request)
mock_response.assert_called_once_with([])
async def test_retrieve_mediation_request(self):
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_mediation_record_retrieve.return_value = self.mock_record
await test_module.retrieve_mediation_request(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value
)
mock_mediation_record_retrieve.assert_called()
async def test_retrieve_mediation_request_x_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.retrieve_mediation_request(self.request)
async def test_retrieve_mediation_request_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.retrieve_mediation_request(self.request)
async def test_delete_mediation_request(self):
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
self.mock_record, "delete_record", async_mock.CoroutineMock()
) as mock_delete_record, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_mediation_record_retrieve.return_value = self.mock_record
await test_module.delete_mediation_request(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value
)
mock_mediation_record_retrieve.assert_called()
mock_delete_record.assert_called()
async def test_delete_mediation_request_x_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.delete_mediation_request(self.request)
async def test_delete_mediation_request_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.delete_mediation_request(self.request)
async def test_request_mediation(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module, "MediationManager", autospec=True
) as mock_med_mgr, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response, async_mock.patch.object(
test_module.MediationRecord,
"exists_for_connection_id",
async_mock.CoroutineMock(return_value=False),
) as mock_mediation_record_exists, async_mock.patch.object(
test_module.ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id:
mock_med_mgr.return_value.prepare_request = async_mock.CoroutineMock(
return_value=(
self.mock_record,
async_mock.MagicMock( # mediation request
serialize=async_mock.MagicMock(return_value={"a": "value"}),
),
)
)
await test_module.request_mediation(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_request_mediation_x_conn_not_ready(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=async_mock.MagicMock(is_ready=False)),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPBadRequest
) as exc:
await test_module.request_mediation(self.request)
assert "request connection is not ready" in exc.msg
async def test_request_mediation_x_already_exists(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_conn_rec_retrieve_by_id, async_mock.patch.object(
test_module.MediationRecord,
"exists_for_connection_id",
async_mock.CoroutineMock(return_value=True),
) as mock_exists_for_conn, self.assertRaises(
test_module.web.HTTPBadRequest
) as exc:
await test_module.request_mediation(self.request)
assert "already exists for connection" in exc.msg
async def test_request_mediation_x_conn_not_found(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPNotFound
):
await test_module.request_mediation(self.request)
async def test_request_mediation_x_storage_error(self):
body = {
"mediator_terms": ["meaningless string because terms are not used"],
"recipient_terms": ["meaningless string because terms are not a 'thing'"],
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.ConnRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_conn_rec_retrieve_by_id, self.assertRaises(
test_module.web.HTTPBadRequest
):
await test_module.request_mediation(self.request)
async def test_mediation_request_grant_role_server(self):
self.mock_record.role = MediationRecord.ROLE_SERVER
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.mediation_request_grant(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_mediation_request_grant_role_client_x(self):
self.mock_record.role = MediationRecord.ROLE_CLIENT
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_grant_x_rec_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_grant_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_grant(self.request)
async def test_mediation_request_deny_role_server(self):
self.mock_record.role = MediationRecord.ROLE_SERVER
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.mediation_request_deny(self.request)
mock_response.assert_called_once_with(
self.mock_record.serialize.return_value, status=201
)
self.outbound_message_router.assert_called()
async def test_mediation_request_deny_role_client_x(self):
self.mock_record.role = MediationRecord.ROLE_CLIENT
with async_mock.patch.object(
test_module.MediationRecord, "retrieve_by_id", async_mock.CoroutineMock()
) as mock_mediation_record_retrieve, async_mock.patch.object(
test_module.web, "json_response"
):
mock_mediation_record_retrieve.return_value = async_mock.MagicMock(
role=MediationRecord.ROLE_CLIENT
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_deny(self.request)
async def test_mediation_request_deny_x_rec_not_found(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.mediation_request_deny(self.request)
async def test_mediation_request_deny_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.mediation_request_deny(self.request)
async def test_get_keylist(self):
self.request.query["role"] = MediationRecord.ROLE_SERVER
self.request.query["conn_id"] = "test-id"
query_results = [
async_mock.MagicMock(
serialize=async_mock.MagicMock(
return_value={"serialized": "route record"}
)
)
]
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(return_value=query_results),
) as mock_query, async_mock.patch.object(
self.context, "session", async_mock.CoroutineMock()
) as mock_session, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.get_keylist(self.request)
mock_response.assert_called_once_with(
[{"serialized": "route record"}], status=200
)
mock_query.assert_called_once_with(
mock_session.return_value,
{"role": MediationRecord.ROLE_SERVER, "connection_id": "test-id"},
)
async def test_get_keylist_no_matching_records(self):
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(return_value=[]),
) as mock_query, async_mock.patch.object(
self.context, "session", async_mock.CoroutineMock()
) as mock_session, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.get_keylist(self.request)
mock_query.assert_called_once_with(mock_session.return_value, {})
mock_response.assert_called_once_with([], status=200)
async def test_get_keylist_storage_error(self):
with async_mock.patch.object(
test_module.RouteRecord,
"query",
async_mock.CoroutineMock(side_effect=test_module.StorageError),
) as mock_query, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.get_keylist(self.request)
async def test_send_keylist_update(self):
body = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
{"recipient_key": "test-key1", "action": "remove"},
]
}
self.request.json.return_value = body
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
state=MediationRecord.STATE_GRANTED, connection_id="test-conn-id"
)
),
) as mock_retrieve_by_id, async_mock.patch.object(
test_module.web,
"json_response",
async_mock.MagicMock(
side_effect=lambda *args, **kwargs: [*args, *kwargs.values()]
),
) as mock_response:
results, status = await test_module.send_keylist_update(self.request)
assert results["updates"] == body["updates"]
assert status == 201
async def test_send_keylist_update_bad_action(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "wrong"},
]
}
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_bad_mediation_state(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(
return_value=async_mock.MagicMock(
state=MediationRecord.STATE_DENIED, connection_id="test-conn-id"
)
),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_bad_updates(self):
self.request.json.return_value = {"updates": []}
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_x_no_mediation_rec(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
), self.assertRaises(test_module.web.HTTPNotFound):
await test_module.send_keylist_update(self.request)
async def test_send_keylist_update_x_storage_error(self):
self.request.json.return_value = {
"updates": [
{"recipient_key": "test-key0", "action": "add"},
]
}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
), self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_update(self.request)
@async_mock.patch.object(test_module, "MediationManager", autospec=True)
async def test_send_keylist_query(self, mock_manager):
self.request.json.return_value = {"filter": {"test": "filter"}}
self.request.query = {"paginate_limit": 10, "paginate_offset": 20}
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_retrieve_by_id, async_mock.patch.object(
mock_manager.return_value,
"prepare_keylist_query",
async_mock.CoroutineMock(),
) as mock_prepare_keylist_query, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.send_keylist_query(self.request)
mock_prepare_keylist_query.assert_called_once_with(
filter_={"test": "filter"}, paginate_limit=10, paginate_offset=20
)
self.outbound_message_router.assert_called()
mock_response.assert_called_once_with(
mock_prepare_keylist_query.return_value.serialize.return_value,
status=201,
)
async def test_send_keylist_query_x_no_mediation_record(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPNotFound):
await test_module.send_keylist_query(self.request)
async def test_send_keylist_query_x_storage_error(self):
with async_mock.patch.object(
test_module.MediationRecord,
"retrieve_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_retrieve_by_id, self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.send_keylist_query(self.request)
async def test_get_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record:
await test_module.get_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=200,
)
async def test_get_empty_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=None),
) as mock_mgr_get_default_record:
await test_module.get_default_mediator(self.request)
json_response.assert_called_once_with(
{},
status=200,
)
async def test_get_default_mediator_storage_error(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.web, "json_response"
) as json_response, async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageNotFoundError()),
) as mock_mgr_get_default_record:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.get_default_mediator(self.request)
async def test_set_default_mediator(self):
self.request.match_info = {
"mediation_id": "fake_id",
}
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"set_default_mediator_by_id",
async_mock.CoroutineMock(),
) as mock_mgr_set_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.set_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=201,
)
async def test_set_default_mediator_storage_error(self):
self.request.match_info = {
"mediation_id": "bad_id",
}
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"set_default_mediator_by_id",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_set_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.set_default_mediator(self.request)
async def test_clear_default_mediator(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(return_value=self.mock_record),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"clear_default_mediator",
async_mock.CoroutineMock(),
) as mock_mgr_clear_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
await test_module.clear_default_mediator(self.request)
json_response.assert_called_once_with(
self.mock_record.serialize.return_value,
status=201,
)
async def test_clear_default_mediator_storage_error(self):
self.request.query = {}
self.context.session = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module.MediationManager,
"get_default_mediator",
async_mock.CoroutineMock(side_effect=test_module.StorageError()),
) as mock_mgr_get_default_record, async_mock.patch.object(
test_module.MediationManager,
"clear_default_mediator",
async_mock.CoroutineMock(),
) as mock_mgr_clear_default_record_by_id, async_mock.patch.object(
test_module.web, "json_response"
) as json_response:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.clear_default_mediator(self.request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
async def test_post_process_routes(self):
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"]
| 43.385593 | 88 | 0.648078 | [
"Apache-2.0"
] | TimoGlastra/aries-cloudagent-python | aries_cloudagent/protocols/coordinate_mediation/v1_0/tests/test_routes.py | 30,717 | Python |
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDataladWebapp(PythonPackage):
"""DataLad extension for exposing commands via a web request API"""
homepage = "https://github.com/datalad/datalad-webapp"
pypi = "datalad_webapp/datalad_webapp-0.3.tar.gz"
version('0.3', sha256='7bbb2ce58a7e0e6d1a7a2f33d7e50fe7e73cd764380e70fdc2d9f651c3d0e312')
depends_on('py-setuptools', type='build')
depends_on('py-datalad@0.12.5:', type=('build', 'run'))
depends_on('py-flask@1.0:', type=('build', 'run'))
depends_on('py-flask-restful', type=('build', 'run'))
depends_on('py-pytest-cov', type=('build', 'run'))
| 37.181818 | 93 | 0.709046 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | Bambi/spack | var/spack/repos/builtin/packages/py-datalad-webapp/package.py | 818 | Python |
"""Simple quantum computations simulation."""
import numpy as np
def I():
"""Identity operator."""
return np.identity(2)
def X():
"""X-rotation, negation operator."""
return np.identity(2)[..., ::-1]
def H():
"""Adamara operator, superposition."""
return np.array([[1, 1], [1, -1]]) / np.sqrt(2)
def SWAP():
"""Swap 2 qubits"""
m = np.identity(4)
m[[1, 2]] = m[[2, 1]]
return m
def CX():
"""Controlled negation."""
m = np.identity(4)
m[[3, 2]] = m[[2, 3]]
return m
def apply(v, *gates):
m = gates[0]
gates = gates[1:]
for gate in gates:
m = np.kron(gate, m)
return m.dot(v)
def observe(v):
v2 = np.absolute(v) ** 2
c = np.random.choice(v.size, 1, p=v2)
return c[0]
# Usage example
# create 3 qubits in state 000, array size 2 ^ n
a = np.array([1, 0, 0, 0, 0, 0, 0, 0])
# transform the 2nd qubit into a superposition of 0 and 1
a = apply(a, I(), H(), I())
# entangle the 1st and 2nd qubit
a = apply(a, CX(), I())
# swap the 2nd and 3rd qubit
a = apply(a, I(), SWAP())
# observe the state
observe(a)
| 17.966102 | 57 | 0.580189 | [
"MIT"
] | duboviy/misc | quantum.py | 1,060 | Python |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
import json
import re
from typing import Union
from jsonschema import RefResolver
from pydantic import BaseModel, Field
from .streams import DEFAULT_START_DATE, ReportGranularity
class OauthCredSpec(BaseModel):
class Config:
title = "OAuth2.0"
auth_type: str = Field(default="oauth2.0", const=True, order=0)
app_id: str = Field(title="App ID", description="The App ID applied by the developer.", airbyte_secret=True)
secret: str = Field(title="Secret", description="The private key of the developer's application.", airbyte_secret=True)
access_token: str = Field(title="Access Token", description="Long-term Authorized Access Token.", airbyte_secret=True)
class SandboxEnvSpec(BaseModel):
class Config:
title = "Sandbox Access Token"
auth_type: str = Field(default="sandbox_access_token", const=True, order=0)
# it is string because UI has the bug https://github.com/airbytehq/airbyte/issues/6875
advertiser_id: str = Field(
title="Advertiser ID", description="The Advertiser ID which generated for the developer's Sandbox application."
)
access_token: str = Field(title="Access Token", description="The Long-term Authorized Access Token.", airbyte_secret=True)
class ProductionEnvSpec(BaseModel):
class Config:
title = "Production Access Token"
auth_type: str = Field(default="prod_access_token", const=True, order=0)
# it is float because UI has the bug https://github.com/airbytehq/airbyte/issues/6875
app_id: str = Field(description="The App ID applied by the developer.", title="App ID")
secret: str = Field(title="Secret", description="The private key of the developer application.", airbyte_secret=True)
access_token: str = Field(title="Access Token", description="The Long-term Authorized Access Token.", airbyte_secret=True)
class SourceTiktokMarketingSpec(BaseModel):
class Config:
title = "TikTok Marketing Source Spec"
start_date: str = Field(
title="Start Date",
default=DEFAULT_START_DATE,
pattern="^[0-9]{4}-[0-9]{2}-[0-9]{2}$",
description="The Start Date in format: YYYY-MM-DD. Any data before this date will not be replicated. "
"If this parameter is not set, all data will be replicated.",
order=0,
)
report_granularity: str = Field(
title="Report Granularity",
description="Which time granularity should be grouped by; for LIFETIME there will be no grouping. "
"This option is used for reports' streams only.",
default=ReportGranularity.default().value,
enum=[g.value for g in ReportGranularity],
order=1,
)
credentials: Union[OauthCredSpec, ProductionEnvSpec, SandboxEnvSpec] = Field(
title="Authorization Method", order=3, default={}, type="object"
)
@classmethod
def change_format_to_oneOf(cls, schema: dict) -> dict:
new_schema = {}
for key, value in schema.items():
if isinstance(value, dict):
value = cls.change_format_to_oneOf(value)
if key == "anyOf":
new_schema["oneOf"] = value
else:
new_schema[key] = value
return new_schema
@staticmethod
def resolve_refs(schema: dict) -> dict:
json_schema_ref_resolver = RefResolver.from_schema(schema)
str_schema = json.dumps(schema)
for ref_block in re.findall(r'{"\$ref": "#\/definitions\/.+?(?="})"}', str_schema):
ref = json.loads(ref_block)["$ref"]
str_schema = str_schema.replace(ref_block, json.dumps(json_schema_ref_resolver.resolve(ref)[1]))
pyschema = json.loads(str_schema)
del pyschema["definitions"]
return pyschema
@classmethod
def schema(cls) -> dict:
"""we're overriding the schema classmethod to enable some post-processing"""
schema = super().schema()
schema = cls.change_format_to_oneOf(schema)
return cls.resolve_refs(schema)
class CompleteOauthOutputSpecification(BaseModel):
access_token: str = Field(path_in_connector_config=["credentials", "access_token"])
class CompleteOauthServerInputSpecification(BaseModel):
app_id: str = Field()
secret: str = Field()
class CompleteOauthServerOutputSpecification(BaseModel):
app_id: str = Field(path_in_connector_config=["credentials", "app_id"])
secret: str = Field(path_in_connector_config=["credentials", "secret"])
| 36.328 | 126 | 0.68355 | [
"MIT"
] | 99designs/airbyte | airbyte-integrations/connectors/source-tiktok-marketing/source_tiktok_marketing/spec.py | 4,541 | Python |
from TASSELpy.java.lang.Number import Number, metaNumber
from TASSELpy.java.lang.Comparable import Comparable
from TASSELpy.utils.DocInherit import DocInherit
from TASSELpy.utils.Overloading import javaOverload,javaConstructorOverload
from TASSELpy.javaObj import javaObj
from TASSELpy.utils.helper import make_sig
from abc import ABCMeta
import numpy as np
java_imports = {'Long':'java/lang/Long',
'String':'java/lang/String'}
class metaLong:
__metaclass__ = ABCMeta
@classmethod
def __subclasshook__(cls, C):
if C == np.int64:
return True
elif C == np.uint64:
return True
elif issubclass(C,Long):
return True
elif issubclass(C,long):
return True
else:
return False
## Wrapper class for java.lang.Long
class Long(Comparable, Number):
"""
Wrapper class for java.lang.Long
"""
_java_name = java_imports['Long']
@javaConstructorOverload(java_imports['Long'],
(make_sig(['long'],'void'),(metaLong,)),
(make_sig([java_imports['String']],'void'),(str,)))
def __init__(self, *args, **kwargs):
"""
Instantiates a new Long
Signatures:
Long(long value)
Long(String s)
Arguments:
Long(long value)
value -- The long to wrap in the object
Long (String s)
s -- The string representing the long
"""
super(Long, self).__init__(*args, generic=(Long,), **kwargs)
@DocInherit
@javaOverload("compareTo",
(make_sig([java_imports['Long']],'int'),(metaLong,),None))
def compareTo(self, *args):
pass
###################################
## Numeric magic methods
###################################
def __pos__(self):
return Long(+self.toPrimative())
def __neg__(self):
return Long(-self.toPrimative())
def __abs__(self):
return Long(abs(self.toPrimativelongValue()))
def __invert__(self):
return Long(~self.toPrimative())
def __floor__(self):
return Long(np.floor(self.toPrimative()))
def __ceil__(self):
return Long(np.ceil(self.toPrimative()))
###################################
## Arithmetic magic methods
###################################
def __add__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() + other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() + other))
def __radd__(self, other):
return self.__add__(other)
def __iadd__(self, other):
return self.__add__(other)
def __sub__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() - other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() - other))
def __rsub__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative()-self.toPrimative()))
else:
return Long(np.int64(other-self.toPrimative()))
def __isub__(self, other):
return self.__sub__(other)
def __mul__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() * other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() * other))
def __rmul__(self, other):
return self.__mul__(other)
def __imul__(self, other):
return self.__mul__(other)
def __floordiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() // other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() // other))
def __rfloordiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() // self.toPrimative()))
else:
return Long(np.int64(other // self.toPrimative()))
def __ifloordiv__(self, other):
return self.__floordiv__(other)
def __div__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() / other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() / other))
def __rdiv__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() / self.toPrimative()))
else:
return Long(np.int64(other / self.toPrimative()))
def __idiv__(self, other):
return self.__div__(other)
def __mod__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() % other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() % other))
def __rmod__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() % self.toPrimative()))
else:
return Long(np.int64(other % self.toPrimative()))
def __imod__(self, other):
return self.__mod__(other)
def __pow__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() ** other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() ** other))
def __rpow__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() ** self.toPrimative()))
else:
return Long(np.int64(other ** self.toPrimative()))
def __ipow__(self, other):
return self.__pow__(other)
def __lshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() << other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() << other))
def __rlshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() << self.toPrimative()))
else:
return Long(np.int64(other << self.toPrimative()))
def __ilshift__(self, other):
return self.__lshift__(other)
def __rshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() >> other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() >> other))
def __rrlshift__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(other.toPrimative() >> self.toPrimative()))
else:
return Long(np.int64(other >> self.toPrimative()))
def __irshift__(self, other):
return self.__rshift__(other)
def __and__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() & other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() & other))
def __rand__(self, other):
return self.__and__(other)
def __iand__(self, other):
return self.__and__(other)
def __or__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() | other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() | other))
def __ror__(self, other):
return self.__or__(other)
def __ior__(self, other):
return self.__or__(other)
def __xor__(self, other):
if isinstance(other, metaNumber):
if isinstance(other, Number):
return Long(np.int64(self.toPrimative() ^ other.toPrimative()))
else:
return Long(np.int64(self.toPrimative() ^ other))
def __rxor__(self, other):
return self.__xor__(other)
def __ixor__(self, other):
return self.__xor__(other)
def __repr__(self):
return "Long(%d)" % self.longValue()
@DocInherit
def toPrimative(self):
return self.longValue()
| 38.705628 | 80 | 0.581926 | [
"BSD-3-Clause"
] | er432/TASSELpy | TASSELpy/java/lang/Long.py | 8,941 | Python |
from __future__ import unicode_literals
import dataent
from dataent.model.rename_doc import rename_doc
def execute():
if dataent.db.table_exists("Email Alert Recipient") and not dataent.db.table_exists("Notification Recipient"):
rename_doc('DocType', 'Email Alert Recipient', 'Notification Recipient')
dataent.reload_doc('email', 'doctype', 'notification_recipient')
if dataent.db.table_exists("Email Alert") and not dataent.db.table_exists("Notification"):
rename_doc('DocType', 'Email Alert', 'Notification')
dataent.reload_doc('email', 'doctype', 'notification')
| 44.461538 | 111 | 0.780277 | [
"MIT"
] | dataent/dataent | dataent/patches/v11_0/rename_email_alert_to_notification.py | 578 | Python |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-t#@y8e6d21m2+#l#m00+pi&d0eyqa2a6v09hle&!6di(d4th*0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.870229 | 91 | 0.705223 | [
"MIT"
] | Mukul-agrawal/profiles-rest-api | profiles_project/settings.py | 3,389 | Python |
import os
import re
import struct
import glob
import numpy as np
import frame_utils
import skimage
import skimage.io
import torch
from torch.utils.data import Dataset
class KLens(Dataset):
#def __init__(self,raft_path="/data2/opticalflow/rnd/opticalflow/RAFT/out_klens_raft_chairs", root_path="/data2/opticalflow/KLENS/images/",root_path2="/data2/opticalflow/KLENS/pins/",filenumberlist=["0030","1106","1113","1132","1134","1167","1173"],split="train",ref="",meas=""):
def __init__(self,raft_path="/data2/opticalflow/algo_comp/flownet2/out/", root_path="/data2/opticalflow/KLENS/images/",root_path2="/data2/opticalflow/KLENS/pins/",filenumberlist=["0030","1106","1113","1132","1134","1167","1173"],split="train",ref="",meas=""):
super(KLens, self).__init__()
self.split = split
raftflowpaths = glob.glob(os.path.join(raft_path,"*.flo"))
file_list = {}
file_list['train'] = []
file_list['valid'] = []
file_list['test'] = []
file_list['train+valid'] = []
for filenum in filenumberlist:
for raftflowpath in raftflowpaths:
#print(raftflowpath)
if "KLE_"+filenum in raftflowpath:
file_list['train'].append([os.path.join(root_path,"KLE_"+filenum+".jpg3.png"),os.path.join(root_path,"KLE_"+filenum+".jpg5.png"),raftflowpath])
file_list["train"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]])
file_list["valid"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]])
file_list["test"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]])
file_list["train+valid"].extend([[os.path.join(root_path,"KLE_0309_exp_sub5.jpg"),os.path.join(root_path,"KLE_0309_exp_sub6.jpg")],[os.path.join(root_path,"KLE_0730_sub5.jpg"),os.path.join(root_path,"KLE_0730_sub6.jpg")],[os.path.join(root_path,"KLE_0747_sub5.jpg"),os.path.join(root_path,"KLE_0747_sub6.jpg")],[os.path.join(root_path,"KLE_9797clean_sub5.jpg"),os.path.join(root_path,"KLE_9797clean_sub6.jpg")],[os.path.join(root_path,"KLE_9803clean_sub5.jpg"),os.path.join(root_path,"KLE_9803clean_sub6.jpg")],[os.path.join(root_path,"NKM_0063_sub5.jpg"),os.path.join(root_path,"NKM_0063_sub6.jpg")],[os.path.join(root_path,"NKM_0109_sub5.jpg"),os.path.join(root_path,"NKM_0109_sub6.jpg")],[os.path.join(root_path,"scene_1_sub5.jpg"),os.path.join(root_path,"scene_1_sub6.jpg")]])
#file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_3.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_3.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_3.jpg")],])
#file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_0.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_0.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_0.jpg")],])
#file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_1.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_1.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_1.jpg")],])
#file_list["train"].extend([[os.path.join(root_path2,"9-AIT_pins_2.jpg"),os.path.join(root_path2,"9-AIT_pins_4.jpg")],[os.path.join(root_path2,"10-Hela_2.jpg"),os.path.join(root_path2,"10-Hela_4.jpg")],[os.path.join(root_path2,"11-Hela_1_2.jpg"),os.path.join(root_path2,"11-Hela_1_4.jpg")],])
self.dataset = file_list
def __len__(self):
return len(self.dataset[self.split])
def __getitem__(self, idx):
try:
im0_path, im1_path, raftflow_path = self.dataset[self.split][idx]
raftflow = frame_utils.readFlow(raftflow_path)
except:
im0_path, im1_path = self.dataset[self.split][idx]
raftflow = np.array([])
img0 = skimage.io.imread(im0_path)
img1 = skimage.io.imread(im1_path)
img0 = torch.tensor(img0/255.).float()
img1 = torch.tensor(img1/255.).float()
return img0, img1,np.array([]),np.array([]), [im0_path , im1_path],raftflow
class Flo:
def __init__(self, w, h):
self.__floec1__ = float(202021.25)
self.__floec2__ = int(w)
self.__floec3__ = int(h)
self.__floheader__ = struct.pack('fii', self.__floec1__, self.__floec2__, self.__floec3__)
self.__floheaderlen__ = len(self.__floheader__)
self.__flow__ = w
self.__floh__ = h
self.__floshape__ = [self.__floh__, self.__flow__, 2]
if self.__floheader__[:4] != b'PIEH':
raise Exception('Expect machine to be LE.')
def load(self, file):
with open(file, 'rb') as fp:
if fp.read(self.__floheaderlen__) != self.__floheader__:
raise Exception('Bad flow header: ' + file)
result = np.ndarray(shape=self.__floshape__,
dtype=np.float32,
buffer=fp.read(),
order='C')
return result
def save(self, arr, fname):
with open(fname, 'wb') as fp:
fp.write(self.__floheader__)
fp.write(arr.astype(np.float32).tobytes())
| 85.136364 | 788 | 0.695275 | [
"MIT"
] | klens-codes/MaskFlownet-Pytorch | data_loaders/KLens.py | 7,492 | Python |
# Telegram settings
TG_CLI = '/opt/tg/bin/telegram-cli'
TG_PUBKEY = '/opt/tg/tg-server.pub'
RECEPIENT = '@your-tg-recepient'
# Reddit App settings
REDDIT_APP_KEY = 'c...w'
REDDIT_APP_SECRET = 'T...c'
REDDIT_USER_AGENT = ('Damaris Bot, v0.1. Read only bot to read posts from'
'/r/cats')
# Sample Captions
CAPTIONS = [
"some",
"random",
"strings",
]
| 21.277778 | 74 | 0.631854 | [
"MIT"
] | avinassh/damaris | sample_settings.py | 383 | Python |
import os
import pickle
import numpy as np
from tqdm import tqdm
from deeptutor.envs.DashEnv import *
from deeptutor.envs.EFCEnv import EFCEnv
from deeptutor.envs.HRLEnv import *
from deeptutor.infrastructure.utils import *
from deeptutor.tutors.LeitnerTutor import LeitnerTutor
from deeptutor.tutors.RandTutor import RandTutor
from deeptutor.tutors.PPOTutor import PPOTutor
from deeptutor.tutors.SACTutor import SACTutor
from deeptutor.tutors.DQNTutor import DQNTutor
from deeptutor.tutors.MLPTRPOTutor import MLPTRPOTutor
from deeptutor.tutors.GRUTRPOTutor import GRUTRPOTutor
from deeptutor.tutors.SuperMnemoTutor import SuperMnemoTutor
from deeptutor.tutors.ThresholdTutor import ThresholdTutor
def load_rewards(tutor_name, data_dir):
filename = os.path.join(data_dir, f"{tutor_name}_reward_logs.pkl")
if not os.path.exists(filename):
return {}
with open(filename, "rb") as f:
return pickle.load(f)["rewards"]
def main():
override = True # override existing data
data_dir = os.path.join(os.getcwd(), "data")
n_steps = 200
n_items = 30
const_delay = 5
discount = 0.99
n_reps = 10
n_eps = 100
env_kwargs = {
"n_items": n_items,
"n_steps": n_steps,
"discount": discount,
"sample_delay": sample_const_delay(const_delay),
}
reward_funcs = [
"likelihood",
"log_likelihood"
]
envs = [
("EFC", EFCEnv),
("HLR", HLREnv),
("DASH", DASHEnv)
]
tutor_builders = [
# ("Random", RandTutor),
# ("Leitner", LeitnerTutor),
# ("SuperMnemo", SuperMnemoTutor),
# ("Threshold", ThresholdTutor),
# ("MLPTRPO", MLPTRPOTutor),
# ("GRUTRPO", GRUTRPOTutor),
# ("PPO", PPOTutor),
("DQN", DQNTutor),
]
rl_tutors = [MLPTRPOTutor, GRUTRPOTutor, PPOTutor, DQNTutor]
reward_logs = {
"n_steps": n_steps,
"n_items": n_items,
"discount": discount,
"const_delay": const_delay,
"n_reps": n_reps,
"n_eps": n_eps,
"reward_funcs": reward_funcs,
}
for i, (tutor_name, build_tutor) in enumerate(tutor_builders):
print(f"Training {tutor_name}")
rewards = load_rewards(tutor_name, data_dir)
for h, (base_env_name, base_env) in enumerate(envs):
for m, reward_func in enumerate(reward_funcs):
env_name = (
base_env_name + "-" + ("L" if reward_func == "likelihood" else "LL")
)
print(f"Environment: {env_name}")
if env_name in rewards.keys() and not override:
print("Skipping\n")
continue
R = np.zeros((n_eps, n_reps))
for j in tqdm(range(n_reps)):
np.random.seed(j)
env = base_env(**env_kwargs, reward_func=reward_func)
if build_tutor in rl_tutors:
rl_env = make_rl_student_env(env)
agent = build_tutor(n_items)
R[:, j] = agent.train(rl_env, n_eps=n_eps, seed=j)
else:
if "Thresh" in tutor_name:
agent = build_tutor(n_items, env=env)
else:
agent = build_tutor(n_items)
R[:, j] = agent.train(env, n_eps=n_eps)
rewards[env_name] = R
reward_logs["rewards"] = rewards
with open(os.path.join(data_dir, f"{tutor_name}_reward_logs.pkl"), "wb") as f:
pickle.dump(reward_logs, f, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| 33.419643 | 94 | 0.583222 | [
"MIT"
] | ManavR123/cs_285_project | deeptutor/scripts/run.py | 3,743 | Python |
from raachem.file_class.gjf import *
from raachem.file_class.inp import *
from raachem.file_class.xyz import *
from raachem.file_class.log import *
from raachem.file_creator.e_analysis import *
from raachem.file_creator.input import *
from raachem.file_creator.xyz import *
from raachem.file_creator.deploy_scripts import *
from raachem.util.gen_purp import *
| 33.727273 | 50 | 0.800539 | [
"MIT"
] | ricalmang/raachem | raachem/__init__.py | 371 | Python |
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDecorator(PythonPackage):
"""The aim of the decorator module it to simplify the usage of decorators
for the average programmer, and to popularize decorators by showing
various non-trivial examples."""
homepage = "https://github.com/micheles/decorator"
url = "https://pypi.io/packages/source/d/decorator/decorator-4.4.0.tar.gz"
version('4.4.0', sha256='86156361c50488b84a3f148056ea716ca587df2f0de1d34750d35c21312725de')
version('4.3.0', sha256='c39efa13fbdeb4506c476c9b3babf6a718da943dab7811c206005a4a956c080c')
version('4.0.9', sha256='90022e83316363788a55352fe39cfbed357aa3a71d90e5f2803a35471de4bba8')
depends_on('python@2.6:2.8,3.2:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
| 42.782609 | 95 | 0.748984 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | CSCfi/spack | var/spack/repos/builtin/packages/py-decorator/package.py | 984 | Python |
"""Support for Aqualink pool lights."""
from iaqualink import AqualinkLightEffect
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_EFFECT,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from . import AqualinkEntity, refresh_system
from .const import DOMAIN as AQUALINK_DOMAIN
PARALLEL_UPDATES = 0
async def async_setup_entry(
hass: HomeAssistant, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up discovered lights."""
devs = []
for dev in hass.data[AQUALINK_DOMAIN][DOMAIN]:
devs.append(HassAqualinkLight(dev))
async_add_entities(devs, True)
class HassAqualinkLight(AqualinkEntity, LightEntity):
"""Representation of a light."""
@property
def name(self) -> str:
"""Return the name of the light."""
return self.dev.label
@property
def is_on(self) -> bool:
"""Return whether the light is on or off."""
return self.dev.is_on
@refresh_system
async def async_turn_on(self, **kwargs) -> None:
"""Turn on the light.
This handles brightness and light effects for lights that do support
them.
"""
brightness = kwargs.get(ATTR_BRIGHTNESS)
effect = kwargs.get(ATTR_EFFECT)
# For now I'm assuming lights support either effects or brightness.
if effect:
effect = AqualinkLightEffect[effect].value
await self.dev.set_effect(effect)
elif brightness:
# Aqualink supports percentages in 25% increments.
pct = int(round(brightness * 4.0 / 255)) * 25
await self.dev.set_brightness(pct)
else:
await self.dev.turn_on()
@refresh_system
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the light."""
await self.dev.turn_off()
@property
def brightness(self) -> int:
"""Return current brightness of the light.
The scale needs converting between 0-100 and 0-255.
"""
return self.dev.brightness * 255 / 100
@property
def effect(self) -> str:
"""Return the current light effect if supported."""
return AqualinkLightEffect(self.dev.effect).name
@property
def effect_list(self) -> list:
"""Return supported light effects."""
return list(AqualinkLightEffect.__members__)
@property
def supported_features(self) -> int:
"""Return the list of features supported by the light."""
if self.dev.is_dimmer:
return SUPPORT_BRIGHTNESS
if self.dev.is_color:
return SUPPORT_EFFECT
return 0
| 28.22449 | 76 | 0.649313 | [
"Apache-2.0"
] | 0xFEEDC0DE64/homeassistant-core | homeassistant/components/iaqualink/light.py | 2,766 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from collections import defaultdict
import argparse
import cld2
import langid
import sys
""" Removes some wrongly aligned pairs from hunalign output """
class LanguageIdentifier(object):
def __init__(self, use_cld2, valid_languages=None):
self.use_cld2 = use_cld2
self.valid_languages = [l.lower() for l in valid_languages]
if not use_cld2 and valid_languages:
langid.set_languages(self.valid_languages)
def is_language(self, s, expected_lang):
""" Check if the language of the segment cannot be reliably identified
as another language. If another than the expected language is
detected return False """
expected_lang = expected_lang.lower()
if self.valid_languages:
assert expected_lang in self.valid_languages
if self.use_cld2:
reliable, _text_bytes, details = cld2.detect(
s.encode("utf-8"),
isPlainText=True,
useFullLangTables=True,
bestEffort=True)
if reliable:
for _lang, langcode, confidence, score in details:
if langcode == expected_lang and confidence >= 10:
return True
return False
else: # unreliable is still counted as OK
return True
else:
lang, confidence = langid.classify(source.lower())
if lang != expected_lang and confidence > 0.9:
# confidence for wrong language higher than 90%
return False
else:
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'),
default=sys.stdout)
parser.add_argument('-deleted', help='file to keep deleted lines',
type=argparse.FileType('w'))
parser.add_argument('-minscore', type=float, default=0,
help='minimum score from hunalign')
parser.add_argument('-slang', '--lang1', help='source language',
dest='source_lang', default='en')
parser.add_argument('-tlang', '--lang2', help='target language',
dest='target_lang', default='fr')
parser.add_argument('-cld2', help='use CLD2 instead of langid.py',
action='store_true')
args = parser.parse_args()
deletions = defaultdict(list)
n_written = 0
n_total = 0
lid = LanguageIdentifier(args.cld2, [args.source_lang, args.target_lang])
for line in args.infile:
n_total += 1
score = 1.0
split_line = line.rstrip('\n').split("\t")
if len(split_line) == 5:
split_line = split_line[-3:]
if len(split_line) == 3:
source, target, score = split_line
else:
assert len(split_line) == 2
source, target = split_line
source = source.decode('utf-8', 'ignore')
target = target.decode('utf-8', 'ignore')
if source == target:
deletions["identical"].append(target)
continue
if not source.strip():
deletions["source_empty"].append('')
continue
elif not target.strip():
deletions["target_empty"].append('')
continue
if float(score) < args.minscore:
deletions["low score"].append("\t".join((source, target, score)))
continue
if float((len(source) + 15)) / float(len(target) + 15) > 1.5:
deletions["source_too_long"].append("%s\t%s" % (source, target))
continue
if float((len(target) + 15)) / float(len(source) + 15) > 1.5:
deletions["source_too_short"].append("%s\t%s" % (source, target))
continue
if not lid.is_language(source, args.source_lang):
deletions["source_lang"].append(source)
continue
if not lid.is_language(target, args.target_lang):
deletions["target_lang"].append(target)
continue
args.outfile.write(line)
n_written += 1
if args.deleted:
args.deleted.write("Written: %d of %d = %f percent\n" %
(n_written, n_total,
100. * n_written / max((1, n_total))))
for reason, deleted in deletions.iteritems():
args.deleted.write("Deleted %d items due to %s\n"
% (len(deleted), reason))
for line in deleted:
if line.strip():
args.deleted.write("\t%s\n" % line.encode('utf-8'))
| 37.921875 | 78 | 0.563865 | [
"Apache-2.0"
] | christianbuck/CorpusMining | baseline/filter_hunalign_bitext.py | 4,854 | Python |
"""Define abstract base classes to construct FileFinder classes."""
import os
import shutil
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Sequence, Union
import mne_bids
@dataclass
class FileFinder(ABC):
"""Basic representation of class for finding and filtering files."""
hemispheres: Union[dict, None] = field(default_factory=dict)
directory: Union[Path, str] = field(init=False)
files: list = field(init=False, default_factory=list)
def __str__(self):
if not self.files:
return "No corresponding files found."
headers = ["Index", "Filename"]
col_width = max(len(os.path.basename(file)) for file in self.files)
format_row = f"{{:>{len(headers[0]) + 2}}}{{:>{col_width + 2}}}"
terminal_size = "\u2500" * shutil.get_terminal_size().columns
return "\n".join(
(
"Corresponding files found:",
"".join(
f"{{:>{len(header) + 2}}}".format(header)
for header in headers
),
terminal_size,
*(
format_row.format(idx, os.path.basename(file))
for idx, file in enumerate(self.files)
),
)
)
def __len__(self) -> int:
if not self.files:
return 0
return len(self.files)
@abstractmethod
def find_files(
self,
directory: Union[str, Path],
extensions: Optional[Union[Sequence, str]] = None,
keywords: Optional[Union[list, str]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
"""Find files in directory with optional
keywords and extensions."""
@abstractmethod
def filter_files(
self,
keywords: Optional[Union[str, list]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list]] = None,
verbose: bool = False,
) -> None:
"""Filter list of filepaths for given parameters."""
@staticmethod
def _keyword_search(
files: list[str], keywords: Optional[Union[str, list]]
) -> list:
if not keywords:
return files
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = [
file for file in files if any(key in file for key in keywords)
]
return filtered_files
def _find_files(
self,
directory: Union[Path, str],
extensions: Optional[Union[list, str]] = None,
) -> None:
"""Find files in directory with optional extensions.
Args:
directory (string)
keywords (list): e.g. ["SelfpacedRota", "ButtonPress] (optional)
extensions (list): e.g. [".json" or "tsv"] (optional)
verbose (bool): verbosity level (optional, default=True)
"""
files = []
for root, _, fnames in os.walk(directory):
fnames = [os.path.join(root, file) for file in fnames]
fnames = self._keyword_search(fnames, extensions)
if fnames:
files.extend(fnames)
self.files = files
def _filter_files(
self,
keywords: Optional[Union[str, list[str]]] = None,
hemisphere: Optional[str] = None,
stimulation: Optional[str] = None,
medication: Optional[str] = None,
exclude: Optional[Union[str, list[str]]] = None,
) -> None:
"""Filter filepaths for given parameters."""
filtered_files = self.files
if exclude:
if not isinstance(exclude, list):
exclude = [exclude]
filtered_files = [
file
for file in filtered_files
if not any(item in file for item in exclude)
]
if keywords:
if not isinstance(keywords, list):
keywords = [keywords]
filtered_files = self._keyword_search(filtered_files, keywords)
if stimulation:
if stimulation.lower() in "stimon":
stim = "StimOn"
elif stimulation.lower() in "stimoff":
stim = "StimOff"
else:
raise ValueError("Keyword for stimulation not valid.")
filtered_files = self._keyword_search(filtered_files, [stim])
if medication:
if medication.lower() in "medon":
med = "MedOn"
elif medication.lower() in "medoff":
med = "MedOff"
else:
raise ValueError("Keyword for medication not valid.")
filtered_files = self._keyword_search(filtered_files, [med])
if hemisphere:
matching_files = []
for file in filtered_files:
subject = mne_bids.get_entities_from_fname(file)["subject"]
if (
subject not in self.hemispheres
or self.hemispheres[subject] is None
):
raise HemisphereNotSpecifiedError(
subject, self.hemispheres
)
hem = self.hemispheres[subject] + "_"
if hemisphere.lower() in "ipsilateral" and hem in file:
matching_files.append(file)
if hemisphere.lower() in "contralateral" and hem not in file:
matching_files.append(file)
filtered_files = matching_files
self.files = filtered_files
class DirectoryNotFoundError(Exception):
"""Exception raised when invalid Reader is passed.
Attributes:
directory -- input directory which caused the error
"""
def __init__(
self,
directory: Union[Path, str],
message="Input directory was not found.",
):
self.directory = directory
self.message = message
super().__init__(self.message)
def __str__(self):
return f"{self.message} Got: {self.directory}."
class HemisphereNotSpecifiedError(Exception):
"""Exception raised when electrode hemisphere is not specified in settings.
Attributes:
subject -- input subject which caused the error
hemisphere -- specified hemispheres
message -- explanation of the error
"""
def __init__(
self,
subject,
hemispheres,
message=(
"Input ECOG hemisphere is not specified in"
" `filefinder_settings.py` for given subject."
),
) -> None:
self.subject = subject
self.hemispheres = hemispheres
self.message = message
super().__init__(self.message)
def __str__(self):
return (
f"{self.message} Unspecified subject: {self.subject}."
f" Specified hemispheres: {self.hemispheres}."
)
| 33.439815 | 79 | 0.56417 | [
"MIT"
] | richardkoehler/pte | src/pte/filetools/filefinder_abc.py | 7,223 | Python |
__all__ = ("group_attempts", "fails_filter", "reduce_to_failures",)
def group_attempts(sequence, filter_func=None):
if filter_func is None:
filter_func = lambda x:True
last, l = None, []
for x in sequence:
if isinstance(x, tuple) and x[0] == 'inspecting':
if l:
yield last, l
last, l = x[1], []
elif last is not None:
if filter_func(x):
# inline ignored frames
if getattr(x, 'ignored', False):
l.extend(y for y in x.events if filter_func(y))
else:
l.append(x)
if l:
yield last, l
def fails_filter(x):
if not isinstance(x, tuple):
return not x.succeeded
if x[0] == "viable":
return not x[1]
return x[0] != "inspecting"
def reduce_to_failures(frame):
if frame.succeeded:
return []
l = [frame]
for pkg, nodes in group_attempts(frame.events, fails_filter):
l2 = []
for x in nodes:
if not isinstance(x, tuple):
l2.append(reduce_to_failures(x))
else:
l2.append(x)
l.append((pkg, l2))
return l
| 28.162791 | 67 | 0.521883 | [
"BSD-3-Clause"
] | CyberTailor/pkgcore | src/pkgcore/resolver/util.py | 1,211 | Python |
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Dinner problem in Z3
#
# From http://www.sellsbrothers.com/spout/#The_Logic_of_Logic
# """
# My son came to me the other day and said, "Dad, I need help with a
# math problem." The problem went like this:
#
# * We're going out to dinner taking 1-6 grandparents, 1-10 parents and/or 1-40 children
# * Grandparents cost $3 for dinner, parents $2 and children $0.50
# * There must be 20 total people at dinner and it must cost $20
# * How many grandparents, parents and children are going to dinner?
# """
#
# This Z3 model was written by Hakan Kjellerstrand (hakank@gmail.com)
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
n = 3
# variables
# x = makeIntVector(sol, "x", 3, 1, 100)
# x = makeRealVector(sol, "x", 3, 1, 100)
# Grandparents, Parents, Children = x
Grandparents = makeRealVar(sol,"Grandparents", 1,6)
Parents = makeRealVar(sol,"Parents", 1,10)
Children = makeRealVar(sol,"Children", 1,40)
# constraints
#
# sol.add(3*Grandparents + 2*Parents + Children/2 == 20)
# multiply with 2:
sol.add(Grandparents * 6 + Parents * 4 + Children * 1 == 40)
# Grandparents + Parents + Children = 20 /\
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print([mod.eval(x) for x in [Grandparents,Parents,Children]])
getDifferentSolution(sol,mod,[Grandparents,Parents,Children])
if num_solutions > 5:
break;
print('num_solutions:', num_solutions)
| 27.981481 | 88 | 0.68233 | [
"MIT"
] | Wikunia/hakank | z3/dinner.py | 1,511 | Python |
# -----------------------------------------------------------------------------
# Libraries
# -----------------------------------------------------------------------------
# Core libs
from typing import TYPE_CHECKING
# Third party libs
from rest_framework.generics import ListCreateAPIView, RetrieveUpdateDestroyAPIView
# Project libs
from apps.users.models import ClientAddress
from apps.users.serializers.client_address import (
ClientAddressCreateSerializer,
ClientAddressRetrieveSerializer,
)
# If type checking, __all__
if TYPE_CHECKING:
pass
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# Classes
# -----------------------------------------------------------------------------
class ClientAddressCreateListView(ListCreateAPIView):
queryset = ClientAddress.objects.all()
serializer_class = ClientAddressCreateSerializer
class ClientAddressRetrieveUpdateView(RetrieveUpdateDestroyAPIView):
queryset = ClientAddress.objects.all()
serializer_class = ClientAddressRetrieveSerializer
| 33.357143 | 83 | 0.443969 | [
"MIT"
] | leonardon473/my-dinner-backend | src/apps/users/views/rest/client_address.py | 1,401 | Python |
import pyasdf
import numpy as np
import scipy.fftpack
import matplotlib.pyplot as plt
'''
this script takes a chunk of noise spectrum for a station pair and
compare their cross-correlation functions computed using two schemes:
one is averaging the frequency domain and the other is in the time
domain
'''
def cross_correlation1(fft1,fft2,maxlag,dt,Nfft):
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(fft1.shape,dtype=np.complex64)
corr = np.conj(fft1) * fft2
ncorr = np.zeros((fft1.shape[0],Nfft),dtype=np.complex64)
ncorr[:,:Nfft//2] = corr[:,:]
ncorr[:,-(Nfft//2)+1:]=np.flip(np.conj(ncorr[:,1:(Nfft//2)]),axis=1)
ncorr[:,0]=complex(0,0)
ncorr = np.real(np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=1)))
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[:,ind]
ncorr = np.mean(ncorr,axis=0)
return ncorr
def cross_correlation2(fft1,fft2,maxlag,dt,Nfft):
#------convert all 2D arrays into 1D to speed up--------
corr = np.zeros(fft1.shape,dtype=np.complex64)
corr = np.conj(fft1) * fft2
ncorr = np.zeros(shape=Nfft,dtype=np.complex64)
ncorr[:Nfft//2] = np.mean(corr,axis=0)
ncorr[-(Nfft//2)+1:]=np.flip(np.conj(ncorr[1:(Nfft//2)]),axis=0)
ncorr[0]=complex(0,0)
ncorr = np.fft.ifftshift(scipy.fftpack.ifft(ncorr, Nfft, axis=0))
print(ncorr.real,ncorr.imag)
tcorr = np.arange(-Nfft//2 + 1, Nfft//2)*dt
ind = np.where(np.abs(tcorr) <= maxlag)[0]
ncorr = ncorr[ind]
return ncorr
#-----common parameters------
iday = '2010_01_10'
icomp = 'EHZ'
dt = 0.05
maxlag = 800
sfile1 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.AC2H.h5'
sfile2 = '/Users/chengxin/Documents/Harvard/Kanto_basin/code/KANTO/FFT/N.CHHH.h5'
#-----------reading the data------------
ds1 = pyasdf.ASDFDataSet(sfile1,mode='r')
ds2 = pyasdf.ASDFDataSet(sfile2,mode='r')
spect1 = ds1.auxiliary_data[icomp][iday].data[:]
spect2 = ds2.auxiliary_data[icomp][iday].data[:]
std1 = ds1.auxiliary_data[icomp][iday].parameters['std']
std2 = ds2.auxiliary_data[icomp][iday].parameters['std']
nwin = spect1.shape[0]
nfft = spect1.shape[1]*2
print('data dimension for spect1 and spect2 are %d and %d' % (spect1.ndim,spect2.ndim))
#------select the sections-------
indx1 = np.where(std1<10)[0]
indx2 = np.where(std2<10)[0]
bb=np.intersect1d(indx1,indx2)
print(spect1[bb,:],spect2[bb,:])
corr1=cross_correlation1(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)
corr2=cross_correlation2(spect1[bb,:],spect2[bb,:],np.round(maxlag),dt,nfft)
#---plotting----
plt.subplot(311)
plt.plot(corr1)
plt.subplot(312)
plt.plot(corr2)
plt.subplot(313)
plt.plot(corr2)
plt.plot(corr1)
plt.show() | 31.896552 | 87 | 0.670991 | [
"MIT"
] | Denolle-Lab/NoisePy | test/data_check/check_linearity_fft.py | 2,775 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorRealIFFTNMixin, validate_fftn, TensorRealFFTN
class TensorIRFFT2(TensorRealFFTN, TensorRealIFFTNMixin):
_op_type_ = OperandDef.IRFFT2
def __init__(self, shape=None, axes=None, norm=None, **kw):
super().__init__(_shape=shape, _axes=axes, _norm=norm, **kw)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
Parameters
----------
a : array_like
The input tensor
s : sequence of ints, optional
Shape of the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {None, "ortho"}, optional
Normalization mode (see `mt.fft`). Default is None.
Returns
-------
out : Tensor
The result of the inverse real 2-D FFT.
See Also
--------
irfftn : Compute the inverse of the N-dimensional FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
if len(axes) != 2:
raise ValueError("axes length should be 2")
a = astensor(a)
axes = validate_fftn(a, s=s, axes=axes, norm=norm)
op = TensorIRFFT2(shape=s, axes=axes, norm=norm, dtype=np.dtype(np.float_))
return op(a)
| 30.720588 | 79 | 0.674485 | [
"Apache-2.0"
] | JeffroMF/mars | mars/tensor/fft/irfft2.py | 2,089 | Python |
"""WebPush Style Autopush Router
This router handles notifications that should be dispatched to an Autopush
node, or stores each individual message, along with its data, in a Message
table for retrieval by the client.
"""
import json
import time
from StringIO import StringIO
from typing import Any # noqa
from botocore.exceptions import ClientError
from twisted.internet.threads import deferToThread
from twisted.web.client import FileBodyProducer
from twisted.internet.defer import (
inlineCallbacks,
returnValue,
CancelledError,
)
from twisted.internet.error import (
ConnectError,
ConnectionClosed,
ConnectionRefusedError,
)
from twisted.logger import Logger
from twisted.web._newclient import ResponseFailed
from twisted.web.http import PotentialDataLoss
from autopush.exceptions import ItemNotFound, RouterException
from autopush.metrics import make_tags
from autopush.protocol import IgnoreBody
from autopush.router.interface import RouterResponse
from autopush.types import JSONDict # noqa
TTL_URL = "https://webpush-wg.github.io/webpush-protocol/#rfc.section.6.2"
class WebPushRouter(object):
"""Implements :class: `autopush.router.interface.IRouter` for internal
routing to an autopush node
"""
log = Logger()
def __init__(self, conf, router_conf, db, agent):
"""Create a new Router"""
self.conf = conf
self.router_conf = router_conf
self.db = db
self.agent = agent
@property
def metrics(self):
return self.db.metrics
def register(self, uaid, router_data, app_id, *args, **kwargs):
# type: (str, JSONDict, str, *Any, **Any) -> None
"""No additional routing data"""
def amend_endpoint_response(self, response, router_data):
# type: (JSONDict, JSONDict) -> None
"""Stubbed out for this router"""
@inlineCallbacks
def route_notification(self, notification, uaid_data):
"""Route a notification to an internal node, and store it if the node
can't deliver immediately or is no longer a valid node
"""
# Determine if they're connected at the moment
node_id = uaid_data.get("node_id")
uaid = uaid_data["uaid"]
router = self.db.router
# Node_id is present, attempt delivery.
# - Send Notification to node
# - Success: Done, return 200
# - Error (Node busy): Jump to Save notification below
# - Error (Client gone, node gone/dead): Clear node entry for user
# - Both: Done, return 503
if node_id:
result = None
try:
result = yield self._send_notification(uaid, node_id,
notification)
except (ConnectError, ConnectionClosed, ResponseFailed,
CancelledError, PotentialDataLoss) as exc:
self.metrics.increment("updates.client.host_gone")
yield deferToThread(router.clear_node,
uaid_data).addErrback(self._eat_db_err)
if isinstance(exc, ConnectionRefusedError):
# Occurs if an IP record is now used by some other node
# in AWS or if the connection timesout.
self.log.debug("Could not route message: {exc}", exc=exc)
if result and result.code == 200:
returnValue(self.delivered_response(notification))
# Save notification, node is not present or busy
# - Save notification
# - Success (older version): Done, return 202
# - Error (db error): Done, return 503
try:
yield self._save_notification(uaid_data, notification)
except ClientError as e:
log_exception = (e.response["Error"]["Code"] !=
"ProvisionedThroughputExceededException")
raise RouterException("Error saving to database",
status_code=503,
response_body="Retry Request",
log_exception=log_exception,
errno=201)
# - Lookup client again to get latest node state after save.
# - Success (node found): Notify node of new notification
# - Success: Done, return 200
# - Error (no client): Done, return 202
# - Error (no node): Clear node entry
# - Both: Done, return 202
# - Success (no node): Done, return 202
# - Error (db error): Done, return 202
# - Error (no client) : Done, return 404
try:
uaid_data = yield deferToThread(router.get_uaid, uaid)
except ClientError:
returnValue(self.stored_response(notification))
except ItemNotFound:
self.metrics.increment("updates.client.deleted")
raise RouterException("User was deleted",
status_code=410,
response_body="Invalid UAID",
log_exception=False,
errno=105)
# Verify there's a node_id in here, if not we're done
node_id = uaid_data.get("node_id")
if not node_id:
returnValue(self.stored_response(notification))
try:
result = yield self._send_notification_check(uaid, node_id)
except (ConnectError, ConnectionClosed, ResponseFailed) as exc:
self.metrics.increment("updates.client.host_gone")
if isinstance(exc, ConnectionRefusedError):
self.log.debug("Could not route message: {exc}", exc=exc)
yield deferToThread(
router.clear_node,
uaid_data).addErrback(self._eat_db_err)
returnValue(self.stored_response(notification))
if result.code == 200:
returnValue(self.delivered_response(notification))
else:
ret_val = self.stored_response(notification)
returnValue(ret_val)
def delivered_response(self, notification):
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(destination='Stored'))
location = "%s/m/%s" % (self.conf.endpoint_url, notification.location)
return RouterResponse(status_code=201, response_body="",
headers={"Location": location,
"TTL": notification.ttl or 0},
logged_status=200)
def stored_response(self, notification):
self.metrics.increment("notification.message_data",
notification.data_length,
tags=make_tags(destination='Direct'))
location = "%s/m/%s" % (self.conf.endpoint_url, notification.location)
return RouterResponse(status_code=201, response_body="",
headers={"Location": location,
"TTL": notification.ttl},
logged_status=202)
#############################################################
# Blocking Helper Functions
#############################################################
def _send_notification(self, uaid, node_id, notification):
"""Send a notification to a specific node_id
This version of the overriden method includes the necessary crypto
headers for the notification.
:type notification: autopush.utils.WebPushNotification
"""
payload = notification.serialize()
payload["timestamp"] = int(time.time())
url = node_id + "/push/" + uaid
request = self.agent.request(
"PUT",
url.encode("utf8"),
bodyProducer=FileBodyProducer(StringIO(json.dumps(payload))),
)
request.addCallback(IgnoreBody.ignore)
return request
def _send_notification_check(self, uaid, node_id):
"""Send a command to the node to check for notifications"""
url = node_id + "/notif/" + uaid
return self.agent.request(
"PUT",
url.encode("utf8"),
).addCallback(IgnoreBody.ignore)
def _save_notification(self, uaid_data, notification):
"""Saves a notification, returns a deferred.
This version of the overridden method saves each individual message
to the message table along with relevant request headers if
available.
:type uaid_data: dict
"""
month_table = uaid_data["current_month"]
if notification.ttl is None:
# Note that this URL is temporary, as well as this warning as
# we will 400 all missing TTL's eventually
raise RouterException(
"Missing TTL Header",
response_body="Missing TTL Header, see: %s" % TTL_URL,
status_code=400,
errno=111,
log_exception=False,
)
if notification.ttl == 0:
location = "%s/m/%s" % (self.conf.endpoint_url,
notification.version)
raise RouterException("Finished Routing", status_code=201,
log_exception=False,
headers={"TTL": str(notification.ttl),
"Location": location},
logged_status=204)
return deferToThread(
self.db.message_table(month_table).store_message,
notification=notification,
)
#############################################################
# Error Callbacks
#############################################################
def _eat_db_err(self, fail):
"""errBack for ignoring provisioned throughput errors"""
fail.trap(ClientError)
| 40.898785 | 78 | 0.572362 | [
"MPL-2.0",
"MPL-2.0-no-copyleft-exception"
] | Acidburn0zzz/autopush | autopush/router/webpush.py | 10,102 | Python |
import json
import os
import pandas
import redis
import types
def json2redis(data,r):
if isinstance(data, types.ListType):
for row in data:
channel = row['channel']
data_type = row['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,row)
else:
channel = data['channel']
data_type = data['data_type']
rkey = 'channel_{}_{}'.format(channel,data_type)
r.lpush(rkey,data)
# initialize redis connection for local and CF deployment
def connect_redis_db(redis_service_name = None):
if os.getenv('NODE_ENV') == 'micropcf':
DB_HOST = os.getenv('REDIS_HOST')
DB_PORT = os.getenv('REDIS_PORT')
DB_PW = os.getenv('REDIS_PASSWORD')
REDIS_DB = 0
elif os.environ.get('VCAP_SERVICES') is None: # running locally
DB_HOST = 'localhost'
DB_PORT = 6379
DB_PW = ''
REDIS_DB = 1
else: # running on CF
env_vars = os.environ['VCAP_SERVICES']
rediscloud_service = json.loads(env_vars)[redis_service_name][0]
credentials = rediscloud_service['credentials']
DB_HOST = credentials['host']
DB_PORT = credentials['port']
DB_PW = password=credentials['password']
REDIS_DB = 0
return redis.StrictRedis(host=DB_HOST,
port=DB_PORT,
password=DB_PW,
db=REDIS_DB)
| 29.784314 | 72 | 0.574062 | [
"Apache-2.0"
] | pivotal-legacy/moves | train-app/helper_functions.py | 1,519 | Python |
#
# BSD 3-Clause License
#
# Copyright (c) 2019, Analog Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# This class is used to generate delay register writes
import re
class regwrite_generator(object):
def __init__(self, seq_file):
self.code_dict = {}
self.data_dict = {}
self.seq_file = seq_file
def create_code_dict(self, text):
reg = re.compile(r'([0-9a-f]{4} [0-9a-f]{4})')
rawinfo = re.findall(reg, text)
for x in rawinfo:
s_line = re.split(r'\s', x)
addr = int(s_line[0],16)
data = int(s_line[2],16)
self.code_dict[addr] = data
return self.code_dict
def create_seq_info(self):
data_name = ['PulseCount', 'LD1_Tap', 'LD2_Tap', 'LD3_Tap', 'LD4_Tap', 'LD5_Tap', 'Pos_Off', 'Vec_Off', 'Start_Loc', 'Tbl_Len']
reg = re.compile(r'([0-9a-zA-Z]+)')
myfile = open(self.seq_file, 'r')
for line in myfile:
rawInfo = re.findall(reg, line)
if len(rawInfo) == 1:
currLabel = rawInfo[0]
if len(rawInfo) == 4:
curr_mode = rawInfo[1]
curr_seq = rawInfo[3]
i = 0
if curr_mode in self.data_dict:
self.data_dict[curr_mode][curr_seq] = {}
else:
self.data_dict[curr_mode] = {}
self.data_dict[curr_mode][curr_seq] = {}
for i in range(10):
rawInfo = re.findall(reg, myfile.readline())
self.data_dict[curr_mode][curr_seq][data_name[i]] = [int(rawInfo[0], 16), int(rawInfo[1], 16)]
myfile.close()
return self.data_dict
# Given mode, sweep specified ld for all sequences
def delay_sequences(self, mode, delay, ld):
delay_writes = {}
for x in self.data_dict[str(mode)]:
writes = self.delay_sequence_ld(delay, ld, self.data_dict[str(mode)][x])
delay_writes = dict(delay_writes, **writes)
return delay_writes
def generate_delay_writes(self, mode, delay_min, delay_max, ld):
writes_dict = {}
for x in range(delay_min, delay_max):
writes_dict[x] = self.delay_sequences(mode, x, ld)
return writes_dict
def setbit(self, bit, vec):
bit = 1 << bit
vec = vec | bit
return vec
def unsetbit(self, bit, vec):
bit = 1 << bit
bit = ~bit
vec = vec & bit
return vec
def get_blanking_values(self, ld, seq_dict):
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
if pos_len != vec_len:
print('Table length not equal')
start_loc = seq_dict['Start_Loc'][1]
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
pos_ptr = (seq_dict['Pos_Off'][1] * 2) + 0x4000
vec_ptr = (seq_dict['Vec_Off'][1] * 2) + 0x4000
blk_pos = -1
blk_neg = -1
for i in range(vec_len):
curr_vec = self.code_dict[vec_ptr + i]
if ((curr_vec >> (ld - 1)) & 0x0001) == 1:
if blk_pos == -1:
blk_pos = i
elif blk_neg == -1:
blk_neg = i
start_pos = start_loc + 2
pos_tbl = []
for i in range(pos_len):
if i == 0:
pos_tbl.append(self.code_dict[pos_ptr+i] + start_pos)
else:
pos_tbl.append(self.code_dict[pos_ptr+i] + pos_tbl[i-1])
blk_pos = pos_tbl[blk_pos]
blk_neg = pos_tbl[blk_neg]
return blk_pos, blk_neg
# Delay Sequence LD
def delay_sequence_ld(self, delay, ld, seq_dict):
taps = seq_dict['LD' + str(ld) + '_Tap'][1]
taps_addr = seq_dict['LD' + str(ld) + '_Tap'][0]
tap_pos = taps & 0x00ff
tap_neg = (taps & 0xff00) >> 8
blk_pos, blk_neg = self.get_blanking_values(ld, seq_dict)
blk_pos_shift = 0
blk_neg_shift = 0
tap_pos = tap_pos + delay
tap_neg = tap_neg + delay
while tap_pos >= 128:
blk_pos_shift += 1
tap_pos -= 128
while tap_neg >= 128:
blk_neg_shift += 1
tap_neg -= 128
while tap_pos < 0:
blk_pos_shift -= 1
tap_pos += 128
while tap_neg < 0:
blk_neg_shift -= 1
tap_neg += 128
blk_pos = blk_pos + blk_pos_shift
blk_neg = blk_neg + blk_neg_shift
tap_write = {}
tap_write[hex(taps_addr)] = (tap_neg << 8) + tap_pos
blk_writes = self.set_blanking_values(blk_pos, blk_neg, ld, seq_dict)
writes = dict(tap_write, **blk_writes)
return writes
# Set blanking vals
def set_blanking_values(self, blk_pos, blk_neg, ld, seq_dict):
start_loc = seq_dict['Start_Loc'][1]
pos_len = seq_dict['Tbl_Len'][1] & 0x00ff
vec_len = (seq_dict['Tbl_Len'][1] & 0xff00) >> 8
pos_ptr = (seq_dict['Pos_Off'][1] * 2) + 0x4000
vec_ptr = (seq_dict['Vec_Off'][1] * 2) + 0x4000
start_pos = start_loc + 2
pos_tbl = []
for i in range(pos_len):
if i == 0:
pos_tbl.append(self.code_dict[pos_ptr+i] + start_pos)
else:
pos_tbl.append(self.code_dict[pos_ptr+i] + pos_tbl[i-1])
blk_pos_loc = pos_tbl.index(blk_pos)
blk_neg_loc = pos_tbl.index(blk_neg)
blk_writes = {}
for i in range(vec_len):
if i == blk_pos_loc:
curr_vec = self.setbit(ld-1, self.code_dict[vec_ptr + i])
elif i == blk_neg_loc:
curr_vec = self.setbit(ld-1, self.code_dict[vec_ptr + i])
else:
curr_vec = self.unsetbit(ld-1, self.code_dict[vec_ptr + i])
blk_writes[hex(vec_ptr + i)] = curr_vec
return blk_writes
| 34.963303 | 135 | 0.575571 | [
"BSD-3-Clause"
] | AkshayKurhade/aditof_sdk | tools/calibration-96tof1/tof_calib/regwrite_generator.py | 7,622 | Python |
#!usr/bin/env python
#-*- coding:utf-8 -*-
"""
@author: nico
@file: pipline.py
@time: 2018/05/05
"""
from django.contrib.auth import get_user_model
from bloguser.utils import get_image_from_url
from uuid import uuid4
User = get_user_model()
def save_bloguser_extra_profile(backend, user, response, *args, **kwargs):
"""
see more:
http://python-social-auth.readthedocs.io/en/latest/use_cases.html#retrieve-google-friends
http://python-social-auth.readthedocs.io/en/latest/pipeline.html
:param backend:
:param user:
:param response:
:param args:
:param kwargs:
:return:
"""
if backend.name == 'github':
#这里获取保存用户github的头像的url,顺便保存到本地
image_url = response.get('avatar_url')
image_file = get_image_from_url(image_url)
if image_file is not None:
# 给头像文件命名采用uuid
avatar_name = 'avatar' + uuid4().hex[:16]
if user.image == 'bloguser/avatar.png':
#如果是默认头像,则替换掉,如果不是则不作处理
user.image.save(avatar_name, image_file)
#user.image_url = image_url
user.save() | 25.444444 | 97 | 0.628821 | [
"BSD-3-Clause"
] | Jennei/MyBlog | apps/bloguser/pipline.py | 1,245 | Python |
def extractMichilunWordpressCom(item):
'''
Parser for 'michilun.wordpress.com'
'''
bad = [
'Recommendations and Reviews',
]
if any([tmp in item['tags'] for tmp in bad]):
return None
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Side Projects - Scheme of the Official Descendant', 'Scheme of the Official Descendant', 'translated'),
('Song in the Peach Blossoms', 'Song in the Peach Blossoms', 'translated'),
('Onrain (Online - The Novel)', 'Onrain (Online - The Novel)', 'translated'),
('At the End of the Wish', 'At the End of the Wish', 'translated'),
('Bringing Calamity to the Nation', 'Bringing Calamity to the Nation', 'translated'),
('Side Projects - The Flame\'s Daughter', 'The Flame\'s Daughter', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | 48.09375 | 145 | 0.499675 | [
"BSD-3-Clause"
] | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractMichilunWordpressCom.py | 1,539 | Python |
import os
LUCKY_SEED = 42
TRAIN_FILE_COUNT = 43
VAL_FILE_COUNT = 12
ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
OBJECTS_DIR = os.path.join(ROOT_DIR, "objects")
OUTPUTS_DIR = os.path.join(ROOT_DIR, "outputs")
LOGS_DIR = os.path.join(ROOT_DIR, "logs")
DATA_DIR = os.path.join(ROOT_DIR, "data")
RAW_DATA_DIR = os.path.join(DATA_DIR, "raw_data")
ORIG_DATA_DIR = os.path.join(RAW_DATA_DIR, "sa-emotions")
OTHERS_RAW_DATA = os.path.join(RAW_DATA_DIR, "others")
PROCESSED_DATA_DIR = os.path.join(DATA_DIR, "processed_data")
COMPLEX_PROCESSED_DATA_DIR = os.path.join(PROCESSED_DATA_DIR, "complex")
SIMPLE_PROCESSED_DATA_DIR = os.path.join(PROCESSED_DATA_DIR, "simple")
TEST_DATA_DIR = os.path.join(DATA_DIR, "testing_data")
TRAIN_DATA_DIR = os.path.join(DATA_DIR, "training_data")
TRAIN_DATA_DIR_WI = os.path.join(TRAIN_DATA_DIR, "word_2_index")
TRAIN_DATA_DIR_TF_IDF = os.path.join(TRAIN_DATA_DIR, "tf_idf")
VAL_DATA_DIR = os.path.join(DATA_DIR, "validation_data")
VAL_DATA_DIR_WI = os.path.join(VAL_DATA_DIR, "word_2_index")
VAL_DATA_DIR_TF_IDF = os.path.join(VAL_DATA_DIR, "tf_idf")
SPACY_MEDIUM_MODEL = "en_core_web_md"
SPACY_LARGE_MODEL = "en_core_web_lg"
TF_HUB_EMBEDDING_MODELS = [
"https://tfhub.dev/google/nnlm-en-dim128/2",
"https://tfhub.dev/google/universal-sentence-encoder/4",
"https://tfhub.dev/google/tf2-preview/nnlm-en-dim50/1",
]
LOG_FORMAT = (
"%(asctime)s | %(levelname)s | %(name)s | %(filename)s | %(lineno)d | %(message)s"
)
LOG_LEVEL = "DEBUG"
LOG_FILE = os.path.join(LOGS_DIR, "sentiment_analysis.log")
LOG_FILE_MAX_BYTES = 1048576
LOG_FILE_BACKUP_COUNT = 2
| 34.297872 | 86 | 0.759926 | [
"MIT"
] | pk0912/TweetEmotionsPredictor | settings.py | 1,612 | Python |
def dif(x, y):
q = 0
for i in range(len(x)):
if x[i] != y[i]: q += 1
return q
e = str(input())
n = int(input())
v = []
for i in range(5): v.append(dif(e, str(input())))
if min(v) > n: print(-1)
else:
print(v.index(min(v))+1)
print(min(v))
| 17.866667 | 49 | 0.492537 | [
"MIT"
] | heltonr13/URI | 2017.py | 268 | Python |
workers = 1 # 定义同时开启的处理请求的进程数量,根据网站流量适当调整
worker_class = "gevent" # 采用gevent库,支持异步处理请求,提高吞吐量
# bind = "0.0.0.0:80"
bind = "0.0.0.0:80"
| 28 | 52 | 0.671429 | [
"MIT"
] | ShiZhuming/StyleTransfer | gunicorn.conf.py | 230 | Python |
n = int(input())
k = int(input())
total = n
for i in range(k):
total += int(str(n) + ('0' * (i+1)))
print(total) | 16.428571 | 38 | 0.530435 | [
"MIT"
] | osa-computer-society/competitive-programming | ccc/2017/ccc17j2.py | 115 | Python |
from toee import *
import char_class_utils
import char_editor
###################################################
def GetConditionName(): # used by API
return "Sorcerer"
# def GetSpellCasterConditionName():
# return "Sorcerer Spellcasting"
def GetCategory():
return "Core 3.5 Ed Classes"
def GetClassDefinitionFlags():
return CDF_BaseClass | CDF_CoreClass
def GetClassHelpTopic():
return "TAG_SORCERERS"
classEnum = stat_level_sorcerer
###################################################
class_feats = {
1: (feat_simple_weapon_proficiency, feat_call_familiar)
}
class_skills = (skill_alchemy, skill_bluff, skill_concentration, skill_craft, skill_knowledge_arcana, skill_profession, skill_spellcraft)
spells_per_day = {
1: (5, 3),
2: (6, 4),
3: (6, 5),
4: (6, 6, 3),
5: (6, 6, 4),
6: (6, 6, 5, 3),
7: (6, 6, 6, 4),
8: (6, 6, 6, 5, 3),
9: (6, 6, 6, 6, 4),
10: (6, 6, 6, 6, 5, 3),
11: (6, 6, 6, 6, 6, 4),
12: (6, 6, 6, 6, 6, 5, 3),
13: (6, 6, 6, 6, 6, 6, 4),
14: (6, 6, 6, 6, 6, 6, 5, 3),
15: (6, 6, 6, 6, 6, 6, 6, 4),
16: (6, 6, 6, 6, 6, 6, 6, 5, 3),
17: (6, 6, 6, 6, 6, 6, 6, 6, 4),
18: (6, 6, 6, 6, 6, 6, 6, 6, 5, 3),
19: (6, 6, 6, 6, 6, 6, 6, 6, 6, 4),
20: (6, 6, 6, 6, 6, 6, 6, 6, 6, 6)
#lvl 0 1 2 3 4 5 6 7 8 9
}
spells_known = {
1: (4, 2),
2: (5, 2),
3: (5, 3),
4: (6, 3, 1),
5: (6, 4, 2),
6: (7, 4, 2, 1),
7: (7, 5, 3, 2),
8: (8, 5, 3, 2, 1),
9: (8, 5, 4, 3, 2),
10: (9, 5, 4, 3, 2, 1),
11: (9, 5, 5, 4, 3, 2),
12: (9, 5, 5, 4, 3, 2, 1),
13: (9, 5, 5, 4, 4, 3, 2),
14: (9, 5, 5, 4, 4, 3, 2, 1),
15: (9, 5, 5, 4, 4, 4, 3, 2),
16: (9, 5, 5, 4, 4, 4, 3, 2, 1),
17: (9, 5, 5, 4, 4, 4, 3, 3, 2),
18: (9, 5, 5, 4, 4, 4, 3, 3, 2, 1),
19: (9, 5, 5, 4, 4, 4, 3, 3, 3, 2),
20: (9, 5, 5, 4, 4, 4, 3, 3, 3, 3)
#lvl 0 1 2 3 4 5 6 7 8 9
}
def GetHitDieType():
return 4
def GetSkillPtsPerLevel():
return 2
def GetBabProgression():
return base_attack_bonus_type_non_martial
def IsFortSaveFavored():
return 0
def IsRefSaveFavored():
return 0
def IsWillSaveFavored():
return 1
# Spell casting
def GetSpellListType():
return spell_list_type_arcane
def GetSpellSourceType():
return spell_source_type_arcane
def GetSpellReadyingType():
return spell_readying_innate
def GetSpellsPerDay():
return spells_per_day
caster_levels = range(1, 21)
def GetCasterLevels():
return caster_levels
def GetSpellDeterminingStat():
return stat_charisma
def IsClassSkill(skillEnum):
return char_class_utils.IsClassSkill(class_skills, skillEnum)
def IsClassFeat(featEnum):
return char_class_utils.IsClassFeat(class_feats, featEnum)
def GetClassFeats():
return class_feats
def IsAlignmentCompatible( alignment):
return 1
def ObjMeetsPrereqs( obj ):
abScore = obj.stat_base_get(stat_charisma)
if abScore > 10:
return 1
return 0
## Levelup callbacks
def IsSelectingSpellsOnLevelup( obj ):
return 1
def InitSpellSelection( obj, classLvlNew = -1, classLvlIncrement = 1):
classLvl = obj.stat_level_get(classEnum)
if classLvlNew <= 0:
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew ) # this regards spell list extension by stuff like Mystic Theurge
# Available Spells
spAvail = char_editor.get_learnable_spells(obj, classEnum, maxSpellLvl)
# add spell level labels
for p in range(0,maxSpellLvl+1):
spAvail.append(char_editor.KnownSpellInfo(spell_label_level_0 + p, 0, classEnum))
spAvail.sort()
char_editor.append_available_spells(spAvail)
# newly taken class
if classLvlNew == 1:
spEnums = []
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0, 0, classEnum)) # add "Level 0" label
for p in range(0,4): # 4 cantrips
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0, 3, classEnum))
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_1, 0, classEnum)) # add "Level 1" label
for p in range(0,2): # 2 level 1 spells
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_1, 3, classEnum))
char_editor.append_spell_enums(spEnums)
return 0
# Incrementing class level
spellListLvl = obj.stat_level_get(stat_spell_list_level, classEnum) + classLvlIncrement # the effective level for getting the number of spells known
spEnums = char_editor.get_known_class_spells(obj, classEnum) # get all spells known for this class
for spellLvl in range(0, maxSpellLvl+1):
spEnums.append(char_editor.KnownSpellInfo(spell_label_level_0 + spellLvl, 0, classEnum)) # add label
# add spells
newSpellsKnownCount = char_class_utils.GetSpellsKnownAddedCount( spells_known , spellListLvl, spellLvl)
print "new num spells for spell level " + str(spellLvl) + ": " + str(newSpellsKnownCount)
for q in range(0, newSpellsKnownCount):
spEnums.append(char_editor.KnownSpellInfo(spell_new_slot_lvl_0 + spellLvl, 3, classEnum))
isReplacing = 0
if spellListLvl >= 4 and (spellListLvl % 2) == 0: # spell replacement
isReplacing = 1
if char_editor.get_class_code() != classEnum: #grant this benefit only for strict levelup (also to prevent some headache...)
isReplacing = 0
if isReplacing == 0:
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
# mark as replaceable
for p in range(0,len(spEnums)):
spEnum = spEnums[p].spell_enum
if spell_vacant <= spEnum <= spell_label_level_9:
continue
if spell_new_slot_lvl_0 <= spEnum <= spell_new_slot_lvl_9:
continue
if char_editor.get_spell_level(spEnum, classEnum) <= maxSpellLvl-2:
spEnums[p].spell_status = 1 # marked as replaceable
spEnums.sort()
char_editor.append_spell_enums(spEnums)
return 0
def LevelupCheckSpells( obj ):
classLvl = obj.stat_level_get(classEnum)
classLvlNew = classLvl + 1
maxSpellLvl = char_editor.get_max_spell_level( obj, classEnum, classLvlNew )
spell_enums = char_editor.get_spell_enums()
for spInfo in spell_enums:
if spInfo.spell_enum == spell_vacant:
if maxSpellLvl >= 4 and spInfo.spell_level == 0: # in case the cantrips are causing problems
continue
return 0
return 1
def LevelupSpellsFinalize( obj, classLvlNew = -1 ):
spEnums = char_editor.get_spell_enums()
char_editor.spell_known_add(spEnums) # internally takes care of duplicates and the labels/vacant slots
return | 28.319635 | 149 | 0.688165 | [
"MIT"
] | Psionics-ToEE/TemplePlus | tpdatasrc/tpgamefiles/rules/char_class/class016_sorcerer.py | 6,202 | Python |
from pathlib import Path
from typing import Dict
import click
from hddcoin.util.config import load_config, save_config, str2bool
from hddcoin.util.default_root import DEFAULT_ROOT_PATH
def configure(
root_path: Path,
set_farmer_peer: str,
set_node_introducer: str,
set_fullnode_port: str,
set_log_level: str,
enable_upnp: str,
set_outbound_peer_count: str,
set_peer_count: str,
testnet: str,
):
config: Dict = load_config(DEFAULT_ROOT_PATH, "config.yaml")
change_made = False
if set_node_introducer:
try:
if set_node_introducer.index(":"):
host, port = (
":".join(set_node_introducer.split(":")[:-1]),
set_node_introducer.split(":")[-1],
)
config["full_node"]["introducer_peer"]["host"] = host
config["full_node"]["introducer_peer"]["port"] = int(port)
config["introducer"]["port"] = int(port)
print("Node introducer updated")
change_made = True
except ValueError:
print("Node introducer address must be in format [IP:Port]")
if set_farmer_peer:
try:
if set_farmer_peer.index(":"):
host, port = (
":".join(set_farmer_peer.split(":")[:-1]),
set_farmer_peer.split(":")[-1],
)
config["full_node"]["farmer_peer"]["host"] = host
config["full_node"]["farmer_peer"]["port"] = int(port)
config["harvester"]["farmer_peer"]["host"] = host
config["harvester"]["farmer_peer"]["port"] = int(port)
print("Farmer peer updated, make sure your harvester has the proper cert installed")
change_made = True
except ValueError:
print("Farmer address must be in format [IP:Port]")
if set_fullnode_port:
config["full_node"]["port"] = int(set_fullnode_port)
config["full_node"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["farmer"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["timelord"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["full_node_peer"]["port"] = int(set_fullnode_port)
config["wallet"]["introducer_peer"]["port"] = int(set_fullnode_port)
config["introducer"]["port"] = int(set_fullnode_port)
print("Default full node port updated")
change_made = True
if set_log_level:
levels = ["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]
if set_log_level in levels:
config["logging"]["log_level"] = set_log_level
print(f"Logging level updated. Check {DEFAULT_ROOT_PATH}/log/debug.log")
change_made = True
else:
print(f"Logging level not updated. Use one of: {levels}")
if enable_upnp is not None:
config["full_node"]["enable_upnp"] = str2bool(enable_upnp)
if str2bool(enable_upnp):
print("uPnP enabled")
else:
print("uPnP disabled")
change_made = True
if set_outbound_peer_count is not None:
config["full_node"]["target_outbound_peer_count"] = int(set_outbound_peer_count)
print("Target outbound peer count updated")
change_made = True
if set_peer_count is not None:
config["full_node"]["target_peer_count"] = int(set_peer_count)
print("Target peer count updated")
change_made = True
if testnet is not None:
if testnet == "true" or testnet == "t":
print("Setting Testnet")
testnet_port = "58444"
testnet_introducer = "beta1_introducer.hddcoin.org"
testnet = "testnet7"
config["full_node"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["port"] = int(testnet_port)
config["farmer"]["full_node_peer"]["port"] = int(testnet_port)
config["timelord"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["full_node_peer"]["port"] = int(testnet_port)
config["wallet"]["introducer_peer"]["port"] = int(testnet_port)
config["introducer"]["port"] = int(testnet_port)
config["full_node"]["introducer_peer"]["host"] = testnet_introducer
config["selected_network"] = testnet
config["harvester"]["selected_network"] = testnet
config["pool"]["selected_network"] = testnet
config["farmer"]["selected_network"] = testnet
config["timelord"]["selected_network"] = testnet
config["full_node"]["selected_network"] = testnet
config["ui"]["selected_network"] = testnet
config["introducer"]["selected_network"] = testnet
config["wallet"]["selected_network"] = testnet
print("Default full node port, introducer and network setting updated")
change_made = True
elif testnet == "false" or testnet == "f":
print("Setting Mainnet")
mainnet_port = "8444"
mainnet_introducer = "introducer.hddcoin.org"
net = "mainnet"
config["full_node"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["port"] = int(mainnet_port)
config["farmer"]["full_node_peer"]["port"] = int(mainnet_port)
config["timelord"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["full_node_peer"]["port"] = int(mainnet_port)
config["wallet"]["introducer_peer"]["port"] = int(mainnet_port)
config["introducer"]["port"] = int(mainnet_port)
config["full_node"]["introducer_peer"]["host"] = mainnet_introducer
config["selected_network"] = net
config["harvester"]["selected_network"] = net
config["pool"]["selected_network"] = net
config["farmer"]["selected_network"] = net
config["timelord"]["selected_network"] = net
config["full_node"]["selected_network"] = net
config["ui"]["selected_network"] = net
config["introducer"]["selected_network"] = net
config["wallet"]["selected_network"] = net
print("Default full node port, introducer and network setting updated")
change_made = True
else:
print("Please choose True or False")
if change_made:
print("Restart any running hddcoin services for changes to take effect")
save_config(root_path, "config.yaml", config)
return 0
@click.command("configure", short_help="Modify configuration")
@click.option(
"--testnet",
"-t",
help="configures for connection to testnet",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option("--set-node-introducer", help="Set the introducer for node - IP:Port", type=str)
@click.option("--set-farmer-peer", help="Set the farmer peer for harvester - IP:Port", type=str)
@click.option(
"--set-fullnode-port",
help="Set the port to use for the fullnode, useful for testing",
type=str,
)
@click.option(
"--set-log-level",
"--log-level",
"-log-level",
help="Set the instance log level",
type=click.Choice(["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG", "NOTSET"]),
)
@click.option(
"--enable-upnp",
"--upnp",
"-upnp",
help="Enable or disable uPnP",
type=click.Choice(["true", "t", "false", "f"]),
)
@click.option(
"--set_outbound-peer-count",
help="Update the target outbound peer count (default 8)",
type=str,
)
@click.option("--set-peer-count", help="Update the target peer count (default 80)", type=str)
@click.pass_context
def configure_cmd(
ctx,
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
):
configure(
ctx.obj["root_path"],
set_farmer_peer,
set_node_introducer,
set_fullnode_port,
set_log_level,
enable_upnp,
set_outbound_peer_count,
set_peer_count,
testnet,
)
| 40.895522 | 100 | 0.60292 | [
"Apache-2.0"
] | grayfallstown/hddcoin-blockchain | hddcoin/cmds/configure.py | 8,220 | Python |
# Third Party
import mxnet as mx
from mxnet.ndarray import NDArray
# First Party
from smdebug.core.collection import DEFAULT_MXNET_COLLECTIONS, CollectionKeys
from smdebug.core.hook import CallbackHook
from smdebug.core.json_config import DEFAULT_WORKER_NAME
from smdebug.core.utils import FRAMEWORK, error_handling_agent
from smdebug.mxnet.collection import CollectionManager
from smdebug.mxnet.graph import _net2pb
from smdebug.mxnet.singleton_utils import set_hook
from smdebug.mxnet.utils import get_reduction_of_data, make_numpy_array
from smdebug.profiler.profiler_config_parser import get_profiler_config_parser
DEFAULT_INCLUDE_COLLECTIONS = [CollectionKeys.LOSSES]
COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK = [
CollectionKeys.WEIGHTS,
CollectionKeys.BIASES,
CollectionKeys.GRADIENTS,
CollectionKeys.LOSSES,
]
profiler_config_parser = get_profiler_config_parser(FRAMEWORK.PYTORCH)
class Hook(CallbackHook):
def __init__(
self,
out_dir=None,
export_tensorboard=False,
tensorboard_dir=None,
dry_run=False,
reduction_config=None,
save_config=None,
include_regex=None,
include_collections=None,
save_all=False,
include_workers="one",
):
collection_manager = CollectionManager()
super().__init__(
collection_manager=collection_manager,
default_include_collections=DEFAULT_INCLUDE_COLLECTIONS,
profiler_config_parser=profiler_config_parser,
data_type_name=mx.ndarray.NDArray.__name__,
out_dir=out_dir,
export_tensorboard=export_tensorboard,
tensorboard_dir=tensorboard_dir,
dry_run=dry_run,
reduction_config=reduction_config,
save_config=save_config,
include_regex=include_regex,
include_collections=include_collections,
save_all=save_all,
include_workers=include_workers,
)
self.last_block = None
self.model = None
self.exported_model = False
# Keep the set of blocks to which this hook is registered. The blocks include loss blocks as well.
self.registered_blocks = set()
self.worker = self._get_worker_name()
set_hook(self)
def _get_worker_name(self):
try:
import horovod.mxnet as hvd
if hvd.size():
return f"worker_{hvd.rank()}"
except (ModuleNotFoundError, ValueError, ImportError):
pass
return DEFAULT_WORKER_NAME
def _get_num_workers(self):
try:
import horovod.mxnet as hvd
if hvd.size():
return hvd.size()
except (ModuleNotFoundError, ValueError, ImportError):
pass
return 1
def _cleanup(self):
# Write the gradients of the past step if the writer is still available.
if self.writer is not None and self.last_block is not None:
self._log_params(self.last_block)
if self.exported_model is False:
self._export_model()
super()._cleanup()
def _log_params(self, block):
params = block.collect_params().values()
for param in params:
self._log_param(param)
def _log_param(self, param):
try:
self._save_for_tensor(
tensor_name=param.name, tensor_value=param.data(param.list_ctx()[0])
)
# If Gradient for this param is available
if param.grad_req != "null":
self._save_for_tensor(
tensor_name=self.GRADIENT_PREFIX + param.name,
tensor_value=param.grad(param.list_ctx()[0]),
)
except RuntimeError as e:
self.logger.warning(
f"Could not log parameter {param.name} due to the mxnet exception: {e}"
)
def _export_model(self):
if self.model is not None:
try:
tb_writer = self._maybe_get_tb_writer()
if tb_writer:
tb_writer.write_graph(_net2pb(self.model))
except (RuntimeError, TypeError) as e:
self.logger.warning(
f"Could not export model graph for tensorboard "
f"due to the mxnet exception: {e}"
)
def _get_default_collections(self):
return DEFAULT_MXNET_COLLECTIONS
# This hook is invoked by trainer prior to running the forward pass.
@error_handling_agent.catch_smdebug_errors()
def forward_pre_hook(self, block, inputs):
if self.writer is not None:
# Write the params and gradients of the
# past step if the writer is still available.
self._log_params(block)
self._close_writers()
self._close_tb_writer()
if not self.prepared_collections:
# at this point we need all collections to be ready
# this may not be the case at creation of hook
# as user's code after hook might add collections
self._prepare_collections()
self.prepared_collections = True
self._increment_step()
if self._get_collections_to_save_for_step():
self._initialize_writers()
if self.exported_model is False:
self._export_model()
self.exported_model = True
if self.last_saved_step is not None and not self.exported_collections:
self.export_collections()
self.exported_collections = True
self.last_block = block
self._save_custom_tensors_post_step()
# This hook is invoked by trainer after running the forward pass.
@error_handling_agent.catch_smdebug_errors()
def forward_hook(self, block, inputs, outputs):
if not self._get_collections_to_save_for_step():
return
block_name = block.name
# This overwhelms the logs; turn back on if you really need it
# logger.debug("Processing the global step {0} for block {1}".format(self.step, block_name))
# Output input tensor
self._write_inputs(block_name, inputs)
# Output output tensors
self._write_outputs(block_name, outputs)
self.last_saved_step = self.step
def _recursive_apply(self, block):
"""
This function is "applied" to every child in the block. This function in turn
registers the forward hook to each module. It helps logging the input output tensors
of that module.
"""
# Check if the hook is already registered for this block.
if block in self.registered_blocks:
self.logger.warning(f"The hook is already registered to block {block.name}")
return
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
def _is_recursive_needed(self):
collections_to_save = self.include_collections
# Check if default collection has a regex associated with it.
# If it does we would need to apply hook recursively.
if (
len(self.collection_manager.get(CollectionKeys.DEFAULT).include_regex) != 0
and CollectionKeys.DEFAULT in collections_to_save
):
return True
# Get the collections that are to be saved but are not part of default collections
# We will need to apply hook recursively to get tensors specified in those collections.
extra_coll = [
value
for value in collections_to_save
if value not in COLLECTIONS_NOT_REQUIRING_RECURSIVE_HOOK
]
# extra_coll contains the collections that are not part of default collections.
return len(extra_coll) != 0
def register_hook(self, block):
# for compatibility with ZCC patches which call this
self.register_block(block)
@error_handling_agent.catch_smdebug_errors()
def register_block(self, block):
"""
This function registers the forward hook. If user wants to register the hook
for every child in the given block, then the function calls "apply" API for
registration of the hook.
The hook is registered recursively, if user has specified the collections that are more than
the default collectors viz. gradients, weight and bias
"""
if not isinstance(block, mx.gluon.Block):
self.logger.error(f"The given block type {block.__class__.__name__} is unsupported.")
return
# Check if the hook is already registered for this block.
if block in self.registered_blocks:
self.logger.warning(f"The hook is already registered to block {block.name}")
return
# Skip the forward pre hook for the Loss blocks.
if isinstance(block, mx.gluon.loss.Loss):
self.logger.info(f"Registering hook for block {block.name}")
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
return
else:
self.model = block
is_recursive = self._is_recursive_needed()
block.register_forward_pre_hook(self.forward_pre_hook)
if is_recursive is True:
block.apply(self._recursive_apply)
else:
block.register_forward_hook(self.forward_hook)
self.registered_blocks.add(block)
@staticmethod
def _get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs):
return get_reduction_of_data(reduction_name, tensor_value, tensor_name, abs)
@staticmethod
def _make_numpy_array(tensor_value):
if isinstance(tensor_value, NDArray):
return tensor_value.asnumpy()
return make_numpy_array(tensor_value)
| 36.628253 | 106 | 0.653405 | [
"Apache-2.0"
] | arjkesh/sagemaker-debugger | smdebug/mxnet/hook.py | 9,853 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .application_base import ApplicationBase
class ApplicationUpdateParameters(ApplicationBase):
"""Request parameters for updating a new application.
:param allow_guests_sign_in: A property on the application to indicate if
the application accepts other IDPs or not or partially accepts.
:type allow_guests_sign_in: bool
:param allow_passthrough_users: Indicates that the application supports
pass through users who have no presence in the resource tenant.
:type allow_passthrough_users: bool
:param app_logo_url: The url for the application logo image stored in a
CDN.
:type app_logo_url: str
:param app_roles: The collection of application roles that an application
may declare. These roles can be assigned to users, groups or service
principals.
:type app_roles: list[~azure.graphrbac.models.AppRole]
:param app_permissions: The application permissions.
:type app_permissions: list[str]
:param available_to_other_tenants: Whether the application is available to
other tenants.
:type available_to_other_tenants: bool
:param error_url: A URL provided by the author of the application to
report errors when using the application.
:type error_url: str
:param group_membership_claims: Configures the groups claim issued in a
user or OAuth 2.0 access token that the app expects. Possible values
include: 'None', 'SecurityGroup', 'All'
:type group_membership_claims: str or
~azure.graphrbac.models.GroupMembershipClaimTypes
:param homepage: The home page of the application.
:type homepage: str
:param informational_urls: URLs with more information about the
application.
:type informational_urls: ~azure.graphrbac.models.InformationalUrl
:param is_device_only_auth_supported: Specifies whether this application
supports device authentication without a user. The default is false.
:type is_device_only_auth_supported: bool
:param key_credentials: A collection of KeyCredential objects.
:type key_credentials: list[~azure.graphrbac.models.KeyCredential]
:param known_client_applications: Client applications that are tied to
this resource application. Consent to any of the known client applications
will result in implicit consent to the resource application through a
combined consent dialog (showing the OAuth permission scopes required by
the client and the resource).
:type known_client_applications: list[str]
:param logout_url: the url of the logout page
:type logout_url: str
:param oauth2_allow_implicit_flow: Whether to allow implicit grant flow
for OAuth2
:type oauth2_allow_implicit_flow: bool
:param oauth2_allow_url_path_matching: Specifies whether during a token
Request Azure AD will allow path matching of the redirect URI against the
applications collection of replyURLs. The default is false.
:type oauth2_allow_url_path_matching: bool
:param oauth2_permissions: The collection of OAuth 2.0 permission scopes
that the web API (resource) application exposes to client applications.
These permission scopes may be granted to client applications during
consent.
:type oauth2_permissions: list[~azure.graphrbac.models.OAuth2Permission]
:param oauth2_require_post_response: Specifies whether, as part of OAuth
2.0 token requests, Azure AD will allow POST requests, as opposed to GET
requests. The default is false, which specifies that only GET requests
will be allowed.
:type oauth2_require_post_response: bool
:param org_restrictions: A list of tenants allowed to access application.
:type org_restrictions: list[str]
:param optional_claims:
:type optional_claims: ~azure.graphrbac.models.OptionalClaims
:param password_credentials: A collection of PasswordCredential objects
:type password_credentials:
list[~azure.graphrbac.models.PasswordCredential]
:param pre_authorized_applications: list of pre-authorized applications.
:type pre_authorized_applications:
list[~azure.graphrbac.models.PreAuthorizedApplication]
:param public_client: Specifies whether this application is a public
client (such as an installed application running on a mobile device).
Default is false.
:type public_client: bool
:param publisher_domain: Reliable domain which can be used to identify an
application.
:type publisher_domain: str
:param reply_urls: A collection of reply URLs for the application.
:type reply_urls: list[str]
:param required_resource_access: Specifies resources that this application
requires access to and the set of OAuth permission scopes and application
roles that it needs under each of those resources. This pre-configuration
of required resource access drives the consent experience.
:type required_resource_access:
list[~azure.graphrbac.models.RequiredResourceAccess]
:param saml_metadata_url: The URL to the SAML metadata for the
application.
:type saml_metadata_url: str
:param sign_in_audience: Audience for signing in to the application
(AzureADMyOrganization, AzureADAllOrganizations,
AzureADAndMicrosoftAccounts).
:type sign_in_audience: str
:param www_homepage: The primary Web page.
:type www_homepage: str
:param display_name: The display name of the application.
:type display_name: str
:param identifier_uris: A collection of URIs for the application.
:type identifier_uris: list[str]
"""
_attribute_map = {
'allow_guests_sign_in': {'key': 'allowGuestsSignIn', 'type': 'bool'},
'allow_passthrough_users': {'key': 'allowPassthroughUsers', 'type': 'bool'},
'app_logo_url': {'key': 'appLogoUrl', 'type': 'str'},
'app_roles': {'key': 'appRoles', 'type': '[AppRole]'},
'app_permissions': {'key': 'appPermissions', 'type': '[str]'},
'available_to_other_tenants': {'key': 'availableToOtherTenants', 'type': 'bool'},
'error_url': {'key': 'errorUrl', 'type': 'str'},
'group_membership_claims': {'key': 'groupMembershipClaims', 'type': 'str'},
'homepage': {'key': 'homepage', 'type': 'str'},
'informational_urls': {'key': 'informationalUrls', 'type': 'InformationalUrl'},
'is_device_only_auth_supported': {'key': 'isDeviceOnlyAuthSupported', 'type': 'bool'},
'key_credentials': {'key': 'keyCredentials', 'type': '[KeyCredential]'},
'known_client_applications': {'key': 'knownClientApplications', 'type': '[str]'},
'logout_url': {'key': 'logoutUrl', 'type': 'str'},
'oauth2_allow_implicit_flow': {'key': 'oauth2AllowImplicitFlow', 'type': 'bool'},
'oauth2_allow_url_path_matching': {'key': 'oauth2AllowUrlPathMatching', 'type': 'bool'},
'oauth2_permissions': {'key': 'oauth2Permissions', 'type': '[OAuth2Permission]'},
'oauth2_require_post_response': {'key': 'oauth2RequirePostResponse', 'type': 'bool'},
'org_restrictions': {'key': 'orgRestrictions', 'type': '[str]'},
'optional_claims': {'key': 'optionalClaims', 'type': 'OptionalClaims'},
'password_credentials': {'key': 'passwordCredentials', 'type': '[PasswordCredential]'},
'pre_authorized_applications': {'key': 'preAuthorizedApplications', 'type': '[PreAuthorizedApplication]'},
'public_client': {'key': 'publicClient', 'type': 'bool'},
'publisher_domain': {'key': 'publisherDomain', 'type': 'str'},
'reply_urls': {'key': 'replyUrls', 'type': '[str]'},
'required_resource_access': {'key': 'requiredResourceAccess', 'type': '[RequiredResourceAccess]'},
'saml_metadata_url': {'key': 'samlMetadataUrl', 'type': 'str'},
'sign_in_audience': {'key': 'signInAudience', 'type': 'str'},
'www_homepage': {'key': 'wwwHomepage', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'identifier_uris': {'key': 'identifierUris', 'type': '[str]'},
}
def __init__(self, **kwargs):
super(ApplicationUpdateParameters, self).__init__(**kwargs)
self.display_name = kwargs.get('display_name', None)
self.identifier_uris = kwargs.get('identifier_uris', None)
| 55.656051 | 114 | 0.707027 | [
"MIT"
] | 16pierre/azure-sdk-for-python | sdk/graphrbac/azure-graphrbac/azure/graphrbac/models/application_update_parameters.py | 8,738 | Python |
def Coeff_Static_Friction(Mat_on_Mat):
# Read from CSV
pass
def Coeff_Kinetic_Friction(Mat_on_Mat):
pass | 14.875 | 39 | 0.739496 | [
"MIT"
] | ZenosParadox/grtoolkit | grtoolkit/Mechanics/Friction/__init__.py | 119 | Python |
## Creates 404 page
import pystache
import utils
def main(data):
html = pystache.render(data["templates"]["page"], {
"title": "Page not found",
"description": "Error 404: page not found",
## Since we don't know the depth of this page relative to the root,
## we have to assume the db directory is located in the root of this web resource
"navigation": utils.generateTopBarNavigation("/" + data["config"].get("Site", "DbPath")),
"name": "error",
"content": pystache.render(data["templates"]["not-found-page-contents"]),
## Since we don't know the depth of this page relative to the root,
## we have to assume the search page is located in the root of this web resource
"search": "/" + data["definitions"]["filenames"]["search"],
})
notFoundFile = utils.mkfile(
data["definitions"]["runtime"]["cwd"],
data["config"].get("Filesystem", "DestinationDirPath"),
data["definitions"]["filenames"]["notfound"],
)
notFoundFile.write(html)
notFoundFile.close()
| 40.851852 | 98 | 0.614687 | [
"BSD-3-Clause"
] | Lyrics/lyrics-website | website-generator.d/80-not-found-page.py | 1,103 | Python |
from kafka import KafkaProducer
from json import dumps as json_dumps, load as json_load
import time
class ProducerServer(KafkaProducer):
def __init__(self, input_file, topic, **kwargs):
super().__init__(**kwargs)
self.input_file = input_file
self.topic = topic
def generate_data(self):
with open(self.input_file) as f:
data = json_load(f)
for line in data:
message = self.dict_to_binary(line)
self.send(self.topic, message)
def dict_to_binary(self, json_dict):
return json_dumps(json_dict).encode('utf-8') | 29.428571 | 55 | 0.645631 | [
"MIT"
] | estarguars113/udacity-spark-project | producer_server.py | 618 | Python |
import os
from subprocess import check_output
import plumbum
from plumbum.cmd import grep, fpm, ln, sort, find, virtualenv
import logging
log = logging.getLogger()
logging.basicConfig(level=logging.INFO)
ENV_PATH = os.getenv("ENV_PATH", "/usr/share/python3/pypi-server")
SRC_PATH = os.getenv("SRC_PATH", "/mnt")
pip = plumbum.local[os.path.join(ENV_PATH, 'bin', 'pip3')]
log.info("Creating virtualenv %r", ENV_PATH)
virtualenv['-p', 'python3', ENV_PATH] & plumbum.FG
log.info("Installing package %r", SRC_PATH)
pip['install', '--no-binary=:all:', '-U', "{}[postgres]".format(SRC_PATH)] & plumbum.FG
pip['install', '--no-binary=:all:', "{}[proxy]".format(SRC_PATH)] & plumbum.FG
pip['install', '--no-binary=:all:', "{}[mysql]".format(SRC_PATH)] & plumbum.FG
ln['-snf', os.path.join(ENV_PATH, "bin", "pypi-server"), "/usr/bin/pypi-server"] & plumbum.BG
version = (pip['show', 'pypi-server'] | grep['^Version']) & plumbum.BG
version.wait()
version = version.stdout.strip().replace("Version:", '').strip()
args = (
'-s', 'dir',
'-f', '-t', 'deb',
'--iteration', os.getenv('ITERATION', '0'),
'-n', 'pypi-server',
'--config-files', '/etc/pypi-server.conf',
'--deb-systemd', '/mnt/contrib/pypi-server.service',
'-v', version,
'-p', "/mnt/dist",
'-d', 'python3',
'-d', 'python3-distutils',
)
depends = check_output((
'find %s -iname "*.so" -exec ldd {} \; | '
'''awk '{print $1}' | '''
'sort -u | '
'xargs dpkg -S | '
'''awk '{print $1}' | '''
'sort -u | '
'''cut -d ':' -f1 | sort -u'''
) % ENV_PATH, shell=True).decode('utf-8').splitlines()
for depend in depends:
args += ('-d', depend)
args += (
'{0}/={0}/'.format(ENV_PATH),
'/usr/bin/pypi-server=/usr/bin/pypi-server',
'/mnt/contrib/pypi-server.conf.example=/etc/pypi-server.conf',
)
fpm[args] & plumbum.FG
| 29.380952 | 93 | 0.605619 | [
"MIT"
] | SrtKoolice/pypi-server | package/make-deb.py | 1,851 | Python |
# Copyright (c) 2010-2020 openpyxlzip
# package imports
from openpyxlzip.reader.excel import load_workbook
from openpyxlzip.xml.functions import tostring, fromstring
from openpyxlzip.styles import Border, Side, PatternFill, Color, Font, fills, borders, colors
from openpyxlzip.styles.differential import DifferentialStyle, DifferentialStyleList
from openpyxlzip.formatting.formatting import ConditionalFormattingList
from openpyxlzip.formatting.rule import CellIsRule, FormulaRule, Rule
# test imports
import pytest
from openpyxlzip.tests.helper import compare_xml
class DummyWorkbook():
def __init__(self):
self._differential_styles = DifferentialStyleList()
self.worksheets = []
class DummyWorksheet():
def __init__(self):
self.conditional_formatting = ConditionalFormattingList()
self.parent = DummyWorkbook()
def test_conditional_formatting_read(datadir):
datadir.chdir()
reference_file = 'conditional-formatting.xlsx'
wb = load_workbook(reference_file)
ws = wb.active
rules = ws.conditional_formatting
assert len(rules) == 30
# First test the conditional formatting rules read
rule = rules['A1:A1048576'][0]
assert dict(rule) == {'priority':'30', 'type': 'colorScale', }
rule = rules['B1:B10'][0]
assert dict(rule) == {'priority': '29', 'type': 'colorScale'}
rule = rules['C1:C10'][0]
assert dict(rule) == {'priority': '28', 'type': 'colorScale'}
rule = rules['D1:D10'][0]
assert dict(rule) == {'priority': '27', 'type': 'colorScale', }
rule = rules['E1:E10'][0]
assert dict(rule) == {'priority': '26', 'type': 'colorScale', }
rule = rules['F1:F10'][0]
assert dict(rule) == {'priority': '25', 'type': 'colorScale', }
rule = rules['G1:G10'][0]
assert dict(rule) == {'priority': '24', 'type': 'colorScale', }
rule = rules['H1:H10'][0]
assert dict(rule) == {'priority': '23', 'type': 'colorScale', }
rule = rules['I1:I10'][0]
assert dict(rule) == {'priority': '22', 'type': 'colorScale', }
rule = rules['J1:J10'][0]
assert dict(rule) == {'priority': '21', 'type': 'colorScale', }
rule = rules['K1:K10'][0]
assert dict(rule) == {'priority': '20', 'type': 'dataBar'}
rule = rules['L1:L10'][0]
assert dict(rule) == {'priority': '19', 'type': 'dataBar'}
rule = rules['M1:M10'][0]
assert dict(rule) == {'priority': '18', 'type': 'dataBar'}
rule = rules['N1:N10'][0]
assert dict(rule) == {'priority': '17', 'type': 'iconSet'}
rule = rules['O1:O10'][0]
assert dict(rule) == {'priority': '16', 'type': 'iconSet'}
rule = rules['P1:P10'][0]
assert dict(rule) == {'priority': '15', 'type': 'iconSet'}
rule = rules['Q1:Q10'][0]
assert dict(rule) == {'text': '3', 'priority': '14', 'dxfId': '27',
'operator': 'containsText', 'type': 'containsText'}
assert rule.dxf == DifferentialStyle(font=Font(color='FF9C0006'),
fill=PatternFill(bgColor='FFFFC7CE')
)
rule = rules['R1:R10'][0]
assert dict(rule) == {'operator': 'between', 'dxfId': '26', 'type':
'cellIs', 'priority': '13'}
assert rule.dxf == DifferentialStyle(font=Font(color='FF9C6500'),
fill=PatternFill(bgColor='FFFFEB9C'))
rule = rules['S1:S10'][0]
assert dict(rule) == {'priority': '12', 'dxfId': '25', 'percent': '1',
'type': 'top10', 'rank': '10'}
rule = rules['T1:T10'][0]
assert dict(rule) == {'priority': '11', 'dxfId': '24', 'type': 'top10',
'rank': '4', 'bottom': '1'}
rule = rules['U1:U10'][0]
assert dict(rule) == {'priority': '10', 'dxfId': '23', 'type':
'aboveAverage'}
rule = rules['V1:V10'][0]
assert dict(rule) == {'aboveAverage': '0', 'dxfId': '22', 'type':
'aboveAverage', 'priority': '9'}
rule = rules['W1:W10'][0]
assert dict(rule) == {'priority': '8', 'dxfId': '21', 'type':
'aboveAverage', 'equalAverage': '1'}
rule = rules['X1:X10'][0]
assert dict(rule) == {'aboveAverage': '0', 'dxfId': '20', 'priority': '7',
'type': 'aboveAverage', 'equalAverage': '1'}
rule = rules['Y1:Y10'][0]
assert dict(rule) == {'priority': '6', 'dxfId': '19', 'type':
'aboveAverage', 'stdDev': '1'}
rule = rules['Z1:Z10'][0]
assert dict(rule)== {'aboveAverage': '0', 'dxfId': '18', 'type':
'aboveAverage', 'stdDev': '1', 'priority': '5'}
assert rule.dxf == DifferentialStyle(font=Font(b=True, i=True, color='FF9C0006'),
fill=PatternFill(bgColor='FFFFC7CE'),
border=Border(
left=Side(style='thin', color=Color(theme=5)),
right=Side(style='thin', color=Color(theme=5)),
top=Side(style='thin', color=Color(theme=5)),
bottom=Side(style='thin', color=Color(theme=5))
)
)
rule = rules['AA1:AA10'][0]
assert dict(rule) == {'priority': '4', 'dxfId': '17', 'type':
'aboveAverage', 'stdDev': '2'}
rule = rules['AB1:AB10'][0]
assert dict(rule) == {'priority': '3', 'dxfId': '16', 'type':
'duplicateValues'}
rule = rules['AC1:AC10'][0]
assert dict(rule) == {'priority': '2', 'dxfId': '15', 'type':
'uniqueValues'}
rule = rules['AD1:AD10'][0]
assert dict(rule) == {'priority': '1', 'dxfId': '14', 'type': 'expression',}
@pytest.fixture
def ConditionalFormatting():
from ..formatting import ConditionalFormatting
return ConditionalFormatting
class TestConditionalFormatting:
def test_ctor(self, ConditionalFormatting):
cf = ConditionalFormatting(sqref="A1:B5")
xml = tostring(cf.to_tree())
expected = """
<conditionalFormatting sqref="A1:B5" />
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_tree(self, ConditionalFormatting):
src = """
<conditionalFormatting sqref="A1:B5" />
"""
tree = fromstring(src)
cf = ConditionalFormatting.from_tree(tree)
assert cf.sqref == "A1:B5"
def test_eq(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
c2 = ConditionalFormatting("A1:B5", pivot=True)
assert c1 == c2
def test_hash(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
assert hash(c1) == hash("A1:B5")
def test_repr(self, ConditionalFormatting):
c1 = ConditionalFormatting("A1:B5")
assert repr(c1) == "<ConditionalFormatting A1:B5>"
def test_contains(self, ConditionalFormatting):
c2 = ConditionalFormatting("A1:A5 B1:B5")
assert "B2" in c2
| 35.221675 | 93 | 0.554685 | [
"MIT"
] | ankitJoshi03/openpyxlzip | openpyxlzip/formatting/tests/test_formatting.py | 7,150 | Python |
class PNChannelGroupsAddChannelResult(object):
pass
class PNChannelGroupsRemoveChannelResult(object):
pass
class PNChannelGroupsRemoveGroupResult(object):
pass
class PNChannelGroupsListResult(object):
def __init__(self, channels):
self.channels = channels
| 17.875 | 49 | 0.772727 | [
"MIT"
] | 17media/pubnub-python | pubnub/models/consumer/channel_group.py | 286 | Python |
import json
from time import sleep
from uuid import uuid4
from datetime import datetime
import logging
from kafka import KafkaProducer, KafkaConsumer
from settings import (
KAFKA_BOOTSTRAP_SERVER,
KAFKA_VALUE_ENCODING,
KAFKA_INBOUND_TOPIC,
KAFKA_SUCCESS_OUTBOUND_TOPIC,
KAFKA_ERROR_OUTBOUND_TOPIC,
KAFKA_DEAD_LETTER_QUEUE_TOPIC,
KAFKA_SUCCESS_ACKS,
KAFKA_ERROR_ACKS,
KAFKA_DEAD_LETTER_QUEUE_ACKS,
KAFKA_INBOUND_GROUP_ID,
KAFKA_INBOUND_AUTO_OFFSET_RESET,
EXECUTION_SLEEP,
EXECUTION_MESSAGE_FORCE_ERROR_KEY,
KAFKA_INBOUND_TIMEOUT,
KAFKA_INBOUND_MAX_RECORDS,
)
from schemas import ResultField
LOGGER = logging.getLogger(__name__)
class RequestsProcessorBuilder(object):
@staticmethod
def build():
return RequestsProcessor(
RequestsProcessorBuilder.build_inbound_consumer(),
RequestsProcessorBuilder.build_success_publisher(),
RequestsProcessorBuilder.build_error_publisher(),
RequestsProcessorBuilder.build_dead_letter_publisher(),
)
@staticmethod
def build_inbound_consumer():
return KafkaConsumer(
KAFKA_INBOUND_TOPIC,
bootstrap_servers=[KAFKA_BOOTSTRAP_SERVER],
auto_offset_reset=KAFKA_INBOUND_AUTO_OFFSET_RESET,
enable_auto_commit=False,
group_id=KAFKA_INBOUND_GROUP_ID,
value_deserializer=lambda value: json.loads(value.decode(KAFKA_VALUE_ENCODING))
)
@staticmethod
def build_success_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_SUCCESS_ACKS)
@staticmethod
def build_error_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_ERROR_ACKS)
@staticmethod
def build_dead_letter_publisher():
return RequestsProcessorBuilder.build_producer(KAFKA_DEAD_LETTER_QUEUE_ACKS)
@staticmethod
def build_producer(acknowledgements):
return KafkaProducer(
bootstrap_servers=[KAFKA_BOOTSTRAP_SERVER],
value_serializer=lambda value: json.dumps(value).encode(KAFKA_VALUE_ENCODING),
acks=acknowledgements
)
class RequestsProcessor(object):
def __init__(self, inbound_consumer, success_publisher, error_publisher, dead_letter_publisher):
self.inbound_consumer = inbound_consumer
self.success_publisher = success_publisher
self.error_publisher = error_publisher
self.dead_letter_publisher = dead_letter_publisher
def start(self):
while True:
messages_by_partition = self.inbound_consumer.poll(
timeout_ms=KAFKA_INBOUND_TIMEOUT,
max_records=KAFKA_INBOUND_MAX_RECORDS,
)
self.handle_messages(messages_by_partition)
def handle_messages(self, messages_by_partition):
for topic_partition, messages in messages_by_partition.items():
for message in messages:
self.handle_message(topic_partition, message)
def handle_message(self, topic_partition, message):
execution = message.value
LOGGER.info("Handling message: '%s'", str(execution))
try:
failed, outputs, start_time, end_time, total_seconds = RequestsProcessor.process(
execution
)
result = RequestsProcessor.build_result(
execution, outputs, start_time, end_time, total_seconds
)
self.publish_to_result_topic(result, failed)
except:
LOGGER.exception("An error occurred while handling the execution")
self.publish_to_dead_letter_queue_topic(execution)
self.commit_current_message(topic_partition)
LOGGER.info("Done handling message: '%s'", str(execution))
def publish_to_result_topic(self, execution, failed):
if failed:
LOGGER.info("Publishing execution to failed executions topic")
self.error_publisher.send(KAFKA_ERROR_OUTBOUND_TOPIC, value=execution)
LOGGER.info("Published execution to failed executions topic")
else:
LOGGER.info("Publishing execution to successful executions topic")
self.success_publisher.send(KAFKA_SUCCESS_OUTBOUND_TOPIC, value=execution)
LOGGER.info("Published execution to successful executions topic")
def publish_to_dead_letter_queue_topic(self, execution):
LOGGER.info("Publishing execution to dead letter queue topic")
self.dead_letter_publisher.send(KAFKA_DEAD_LETTER_QUEUE_TOPIC, value=execution)
LOGGER.info("Published execution to dead letter queue topic")
def commit_current_message(self, topic_partition):
LOGGER.info("Committing")
self.inbound_consumer.commit()
new_offset = self.inbound_consumer.committed(topic_partition)
LOGGER.info("Committed. New Kafka offset: %s", new_offset)
@staticmethod
def process(execution):
LOGGER.info("Executing: %s", execution)
start_time = datetime.utcnow()
failed, outputs = Executor(execution).execute()
end_time = datetime.utcnow()
processing_time_difference = end_time - start_time
processing_time_seconds = processing_time_difference.total_seconds()
LOGGER.info("Executed: %s", execution)
return failed, outputs, start_time, end_time, processing_time_seconds
@staticmethod
def build_result(execution, outputs, start_time, end_time, total_seconds):
return {
ResultField.ID: generate_identifier(),
ResultField.START_TIME: str(start_time),
ResultField.END_TIME: str(end_time),
ResultField.TOTAL_SECONDS: total_seconds,
ResultField.EXECUTION: execution.copy(),
ResultField.OUTPUTS: outputs
}
class Executor(object):
def __init__(self, execution):
self.execution = execution
def execute(self):
Executor.wait(EXECUTION_SLEEP)
force_error = self.execution.get(EXECUTION_MESSAGE_FORCE_ERROR_KEY)
outputs = Executor.get_outputs(force_error)
return force_error, outputs
@staticmethod
def wait(seconds):
LOGGER.info("Sleeping for %d seconds...", seconds)
sleep(seconds)
LOGGER.info("Done waiting")
@staticmethod
def get_outputs(force_error):
outputs = {}
if not force_error:
outputs[ResultField.OUTPUT_MESSAGE_KEY] = ResultField.OUTPUT_MESSAGE_VALUE
return outputs
def generate_identifier():
return str(uuid4())
| 34.317708 | 100 | 0.700562 | [
"MIT"
] | gabrielbazan/sate | processor/processor/requests_processor.py | 6,589 | Python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetVirtualMachineScaleSetResult',
'AwaitableGetVirtualMachineScaleSetResult',
'get_virtual_machine_scale_set',
]
@pulumi.output_type
class GetVirtualMachineScaleSetResult:
"""
Describes a Virtual Machine Scale Set.
"""
def __init__(__self__, additional_capabilities=None, automatic_repairs_policy=None, do_not_run_extensions_on_overprovisioned_vms=None, extended_location=None, host_group=None, id=None, identity=None, location=None, name=None, orchestration_mode=None, overprovision=None, plan=None, platform_fault_domain_count=None, provisioning_state=None, proximity_placement_group=None, scale_in_policy=None, single_placement_group=None, sku=None, tags=None, type=None, unique_id=None, upgrade_policy=None, virtual_machine_profile=None, zone_balance=None, zones=None):
if additional_capabilities and not isinstance(additional_capabilities, dict):
raise TypeError("Expected argument 'additional_capabilities' to be a dict")
pulumi.set(__self__, "additional_capabilities", additional_capabilities)
if automatic_repairs_policy and not isinstance(automatic_repairs_policy, dict):
raise TypeError("Expected argument 'automatic_repairs_policy' to be a dict")
pulumi.set(__self__, "automatic_repairs_policy", automatic_repairs_policy)
if do_not_run_extensions_on_overprovisioned_vms and not isinstance(do_not_run_extensions_on_overprovisioned_vms, bool):
raise TypeError("Expected argument 'do_not_run_extensions_on_overprovisioned_vms' to be a bool")
pulumi.set(__self__, "do_not_run_extensions_on_overprovisioned_vms", do_not_run_extensions_on_overprovisioned_vms)
if extended_location and not isinstance(extended_location, dict):
raise TypeError("Expected argument 'extended_location' to be a dict")
pulumi.set(__self__, "extended_location", extended_location)
if host_group and not isinstance(host_group, dict):
raise TypeError("Expected argument 'host_group' to be a dict")
pulumi.set(__self__, "host_group", host_group)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if orchestration_mode and not isinstance(orchestration_mode, str):
raise TypeError("Expected argument 'orchestration_mode' to be a str")
pulumi.set(__self__, "orchestration_mode", orchestration_mode)
if overprovision and not isinstance(overprovision, bool):
raise TypeError("Expected argument 'overprovision' to be a bool")
pulumi.set(__self__, "overprovision", overprovision)
if plan and not isinstance(plan, dict):
raise TypeError("Expected argument 'plan' to be a dict")
pulumi.set(__self__, "plan", plan)
if platform_fault_domain_count and not isinstance(platform_fault_domain_count, int):
raise TypeError("Expected argument 'platform_fault_domain_count' to be a int")
pulumi.set(__self__, "platform_fault_domain_count", platform_fault_domain_count)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if proximity_placement_group and not isinstance(proximity_placement_group, dict):
raise TypeError("Expected argument 'proximity_placement_group' to be a dict")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if scale_in_policy and not isinstance(scale_in_policy, dict):
raise TypeError("Expected argument 'scale_in_policy' to be a dict")
pulumi.set(__self__, "scale_in_policy", scale_in_policy)
if single_placement_group and not isinstance(single_placement_group, bool):
raise TypeError("Expected argument 'single_placement_group' to be a bool")
pulumi.set(__self__, "single_placement_group", single_placement_group)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if unique_id and not isinstance(unique_id, str):
raise TypeError("Expected argument 'unique_id' to be a str")
pulumi.set(__self__, "unique_id", unique_id)
if upgrade_policy and not isinstance(upgrade_policy, dict):
raise TypeError("Expected argument 'upgrade_policy' to be a dict")
pulumi.set(__self__, "upgrade_policy", upgrade_policy)
if virtual_machine_profile and not isinstance(virtual_machine_profile, dict):
raise TypeError("Expected argument 'virtual_machine_profile' to be a dict")
pulumi.set(__self__, "virtual_machine_profile", virtual_machine_profile)
if zone_balance and not isinstance(zone_balance, bool):
raise TypeError("Expected argument 'zone_balance' to be a bool")
pulumi.set(__self__, "zone_balance", zone_balance)
if zones and not isinstance(zones, list):
raise TypeError("Expected argument 'zones' to be a list")
pulumi.set(__self__, "zones", zones)
@property
@pulumi.getter(name="additionalCapabilities")
def additional_capabilities(self) -> Optional['outputs.AdditionalCapabilitiesResponse']:
"""
Specifies additional capabilities enabled or disabled on the Virtual Machines in the Virtual Machine Scale Set. For instance: whether the Virtual Machines have the capability to support attaching managed data disks with UltraSSD_LRS storage account type.
"""
return pulumi.get(self, "additional_capabilities")
@property
@pulumi.getter(name="automaticRepairsPolicy")
def automatic_repairs_policy(self) -> Optional['outputs.AutomaticRepairsPolicyResponse']:
"""
Policy for automatic repairs.
"""
return pulumi.get(self, "automatic_repairs_policy")
@property
@pulumi.getter(name="doNotRunExtensionsOnOverprovisionedVMs")
def do_not_run_extensions_on_overprovisioned_vms(self) -> Optional[bool]:
"""
When Overprovision is enabled, extensions are launched only on the requested number of VMs which are finally kept. This property will hence ensure that the extensions do not run on the extra overprovisioned VMs.
"""
return pulumi.get(self, "do_not_run_extensions_on_overprovisioned_vms")
@property
@pulumi.getter(name="extendedLocation")
def extended_location(self) -> Optional['outputs.ExtendedLocationResponse']:
"""
The extended location of the Virtual Machine Scale Set.
"""
return pulumi.get(self, "extended_location")
@property
@pulumi.getter(name="hostGroup")
def host_group(self) -> Optional['outputs.SubResourceResponse']:
"""
Specifies information about the dedicated host group that the virtual machine scale set resides in. <br><br>Minimum api-version: 2020-06-01.
"""
return pulumi.get(self, "host_group")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.VirtualMachineScaleSetIdentityResponse']:
"""
The identity of the virtual machine scale set, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="orchestrationMode")
def orchestration_mode(self) -> Optional[str]:
"""
Specifies the orchestration mode for the virtual machine scale set.
"""
return pulumi.get(self, "orchestration_mode")
@property
@pulumi.getter
def overprovision(self) -> Optional[bool]:
"""
Specifies whether the Virtual Machine Scale Set should be overprovisioned.
"""
return pulumi.get(self, "overprovision")
@property
@pulumi.getter
def plan(self) -> Optional['outputs.PlanResponse']:
"""
Specifies information about the marketplace image used to create the virtual machine. This element is only used for marketplace images. Before you can use a marketplace image from an API, you must enable the image for programmatic use. In the Azure portal, find the marketplace image that you want to use and then click **Want to deploy programmatically, Get Started ->**. Enter any required information and then click **Save**.
"""
return pulumi.get(self, "plan")
@property
@pulumi.getter(name="platformFaultDomainCount")
def platform_fault_domain_count(self) -> Optional[int]:
"""
Fault Domain count for each placement group.
"""
return pulumi.get(self, "platform_fault_domain_count")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> Optional['outputs.SubResourceResponse']:
"""
Specifies information about the proximity placement group that the virtual machine scale set should be assigned to. <br><br>Minimum api-version: 2018-04-01.
"""
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter(name="scaleInPolicy")
def scale_in_policy(self) -> Optional['outputs.ScaleInPolicyResponse']:
"""
Specifies the scale-in policy that decides which virtual machines are chosen for removal when a Virtual Machine Scale Set is scaled-in.
"""
return pulumi.get(self, "scale_in_policy")
@property
@pulumi.getter(name="singlePlacementGroup")
def single_placement_group(self) -> Optional[bool]:
"""
When true this limits the scale set to a single placement group, of max size 100 virtual machines. NOTE: If singlePlacementGroup is true, it may be modified to false. However, if singlePlacementGroup is false, it may not be modified to true.
"""
return pulumi.get(self, "single_placement_group")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The virtual machine scale set sku.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueId")
def unique_id(self) -> str:
"""
Specifies the ID which uniquely identifies a Virtual Machine Scale Set.
"""
return pulumi.get(self, "unique_id")
@property
@pulumi.getter(name="upgradePolicy")
def upgrade_policy(self) -> Optional['outputs.UpgradePolicyResponse']:
"""
The upgrade policy.
"""
return pulumi.get(self, "upgrade_policy")
@property
@pulumi.getter(name="virtualMachineProfile")
def virtual_machine_profile(self) -> Optional['outputs.VirtualMachineScaleSetVMProfileResponse']:
"""
The virtual machine profile.
"""
return pulumi.get(self, "virtual_machine_profile")
@property
@pulumi.getter(name="zoneBalance")
def zone_balance(self) -> Optional[bool]:
"""
Whether to force strictly even Virtual Machine distribution cross x-zones in case there is zone outage.
"""
return pulumi.get(self, "zone_balance")
@property
@pulumi.getter
def zones(self) -> Optional[Sequence[str]]:
"""
The virtual machine scale set zones. NOTE: Availability zones can only be set when you create the scale set
"""
return pulumi.get(self, "zones")
class AwaitableGetVirtualMachineScaleSetResult(GetVirtualMachineScaleSetResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVirtualMachineScaleSetResult(
additional_capabilities=self.additional_capabilities,
automatic_repairs_policy=self.automatic_repairs_policy,
do_not_run_extensions_on_overprovisioned_vms=self.do_not_run_extensions_on_overprovisioned_vms,
extended_location=self.extended_location,
host_group=self.host_group,
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
orchestration_mode=self.orchestration_mode,
overprovision=self.overprovision,
plan=self.plan,
platform_fault_domain_count=self.platform_fault_domain_count,
provisioning_state=self.provisioning_state,
proximity_placement_group=self.proximity_placement_group,
scale_in_policy=self.scale_in_policy,
single_placement_group=self.single_placement_group,
sku=self.sku,
tags=self.tags,
type=self.type,
unique_id=self.unique_id,
upgrade_policy=self.upgrade_policy,
virtual_machine_profile=self.virtual_machine_profile,
zone_balance=self.zone_balance,
zones=self.zones)
def get_virtual_machine_scale_set(expand: Optional[str] = None,
resource_group_name: Optional[str] = None,
vm_scale_set_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVirtualMachineScaleSetResult:
"""
Describes a Virtual Machine Scale Set.
API Version: 2021-03-01.
:param str expand: The expand expression to apply on the operation. 'UserData' retrieves the UserData property of the VM scale set that was provided by the user during the VM scale set Create/Update operation
:param str resource_group_name: The name of the resource group.
:param str vm_scale_set_name: The name of the VM scale set.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['vmScaleSetName'] = vm_scale_set_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:compute:getVirtualMachineScaleSet', __args__, opts=opts, typ=GetVirtualMachineScaleSetResult).value
return AwaitableGetVirtualMachineScaleSetResult(
additional_capabilities=__ret__.additional_capabilities,
automatic_repairs_policy=__ret__.automatic_repairs_policy,
do_not_run_extensions_on_overprovisioned_vms=__ret__.do_not_run_extensions_on_overprovisioned_vms,
extended_location=__ret__.extended_location,
host_group=__ret__.host_group,
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
orchestration_mode=__ret__.orchestration_mode,
overprovision=__ret__.overprovision,
plan=__ret__.plan,
platform_fault_domain_count=__ret__.platform_fault_domain_count,
provisioning_state=__ret__.provisioning_state,
proximity_placement_group=__ret__.proximity_placement_group,
scale_in_policy=__ret__.scale_in_policy,
single_placement_group=__ret__.single_placement_group,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type,
unique_id=__ret__.unique_id,
upgrade_policy=__ret__.upgrade_policy,
virtual_machine_profile=__ret__.virtual_machine_profile,
zone_balance=__ret__.zone_balance,
zones=__ret__.zones)
| 45.819843 | 558 | 0.690182 | [
"Apache-2.0"
] | polivbr/pulumi-azure-native | sdk/python/pulumi_azure_native/compute/get_virtual_machine_scale_set.py | 17,549 | Python |
print ("welcome to edureka!! ")
| 16 | 31 | 0.65625 | [
"MIT"
] | jatin06/learning-git | edureka.py | 32 | Python |
import pandas as pd
from pandas.testing import assert_frame_equal
import pytest
from unittest import mock
from nesta.packages.geo_utils.geocode import geocode
from nesta.packages.geo_utils.geocode import _geocode
from nesta.packages.geo_utils.geocode import geocode_dataframe
from nesta.packages.geo_utils.geocode import geocode_batch_dataframe
from nesta.packages.geo_utils.geocode import generate_composite_key
from nesta.packages.geo_utils.country_iso_code import country_iso_code
from nesta.packages.geo_utils.country_iso_code import country_iso_code_dataframe
from nesta.packages.geo_utils.country_iso_code import country_iso_code_to_name
from nesta.packages.geo_utils.lookup import get_continent_lookup
from nesta.packages.geo_utils.lookup import get_country_region_lookup
from nesta.packages.geo_utils.lookup import get_country_continent_lookup
REQUESTS = 'nesta.packages.geo_utils.geocode.requests.get'
PYCOUNTRY = 'nesta.packages.geo_utils.country_iso_code.pycountry.countries.get'
GEOCODE = 'nesta.packages.geo_utils.geocode.geocode'
_GEOCODE = 'nesta.packages.geo_utils.geocode._geocode'
COUNTRY_ISO_CODE = 'nesta.packages.geo_utils.country_iso_code.country_iso_code'
class TestGeocoding():
@staticmethod
@pytest.fixture
def mocked_osm_response():
mocked_response = mock.Mock()
mocked_response.json.return_value = [{'lat': '12.923432', 'lon': '-75.234569'}]
return mocked_response
def test_error_raised_when_arguments_missing(self):
with pytest.raises(ValueError) as e:
geocode()
assert "No geocode match" in str(e.value)
@mock.patch(REQUESTS)
def test_request_includes_user_agent_in_header(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
geocode(something='a')
assert mocked_request.call_args[1]['headers'] == {'User-Agent': 'Nesta health data geocode'}
@mock.patch(REQUESTS)
def test_url_correct_with_city_and_country(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
kwargs = dict(city='london', country='UK')
geocode(**kwargs)
assert mocked_request.call_args[1]['params'] == dict(format="json", **kwargs)
@mock.patch(REQUESTS)
def test_url_correct_with_query(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
kwargs = dict(q='my place')
geocode(**kwargs)
assert mocked_request.call_args[1]['params'] == dict(format="json", **kwargs)
@mock.patch(REQUESTS)
def test_error_returned_if_no_match(self, mocked_request):
mocked_response = mock.Mock()
mocked_response.json.return_value = []
mocked_request.return_value = mocked_response
with pytest.raises(ValueError) as e:
geocode(q="Something bad")
assert "No geocode match" in str(e.value)
@mock.patch(REQUESTS)
def test_coordinates_extracted_from_json_with_one_result(self, mocked_request, mocked_osm_response):
mocked_request.return_value = mocked_osm_response
assert geocode(q='somewhere') == [{'lat': '12.923432', 'lon': '-75.234569'}]
@mock.patch(GEOCODE)
def test_geocode_wrapper_rejects_invalid_query_parameters(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode(cat='dog', city='Nice')
assert "Invalid query parameter" in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_rejects_both_q_and_kwargs_supplied(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode(city='London', q='somewhere')
assert "Supply either q OR other query parameters, they cannot be combined." in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_errors_if_no_query_parameters_supplied(self, mocked_geocode):
with pytest.raises(ValueError) as e:
_geocode()
assert "No query parameters supplied" in str(e.value)
@mock.patch(GEOCODE)
def test_geocode_wrapper_calls_geocode_properly(self, mocked_geocode):
mocked_geocode.return_value = [{'lat': 1.1, 'lon': 2.2}]
_geocode('my place')
_geocode(q='somewhere')
_geocode(city='London', country='UK')
_geocode(postalcode='ABC 123')
expected_calls = [mock.call(q='my place'),
mock.call(q='somewhere'),
mock.call(city='London', country='UK'),
mock.call(postalcode='ABC 123')
]
assert mocked_geocode.mock_calls == expected_calls
class TestGeocodeDataFrame():
@staticmethod
@pytest.fixture
def test_dataframe():
df = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
})
return df
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_city_country(self, mocked_geocode,
test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = ['cat', 'dog', 'squirrel']
geocoded_dataframe = geocode_dataframe(test_dataframe)
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'coordinates': ['cat', 'dog', 'squirrel']
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium')]
# Check expected behaviours
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_fallback(self, mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [None, None, None, 'dog', 'cat', 'squirrel']
geocoded_dataframe = geocode_dataframe(test_dataframe)
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'coordinates': ['dog', 'cat', 'squirrel']
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium'),
mock.call('London UK'),
mock.call('Sheffield United Kingdom'),
mock.call('Brussels Belgium')]
# Check expected behaviours
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_duplicates_are_only_geocoded_once(self, mocked_geocode):
test_dataframe = pd.DataFrame({'index': [0, 1, 2, 3],
'city': ['London', 'Brussels', 'London', 'Brussels'],
'country': ['UK', 'Belgium', 'UK', 'Belgium']
})
mocked_geocode.side_effect = ['LON', 'BRU']
geocoded_dataframe = geocode_dataframe(test_dataframe)
expected_dataframe = pd.DataFrame({'index': [0, 1, 2, 3],
'city': ['London', 'Brussels', 'London', 'Brussels'],
'country': ['UK', 'Belgium', 'UK', 'Belgium'],
'coordinates': ['LON', 'BRU', 'LON', 'BRU']
})
assert geocoded_dataframe.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
assert mocked_geocode.call_count == 2
class TestGeocodeBatchDataframe():
@staticmethod
@pytest.fixture
def test_dataframe():
df = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
})
return df
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_city_country(self, mocked_geocode,
test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = [{'lat': '12.923432', 'lon': '-75.234569'},
{'lat': '99.999999', 'lon': '-88.888888'},
{'lat': '-2.202022', 'lon': '0.000000'}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [12.923432, 99.999999, -2.202022],
'longitude': [-75.234569, -88.888888, 0.0]
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(city='Brussels', country='Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe)
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_fallback(self,
mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [None,
{'lat': 1, 'lon': 4},
None,
{'lat': 2, 'lon': 5},
None,
{'lat': 3, 'lon': 6}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [1.0, 2.0, 3.0],
'longitude': [4.0, 5.0, 6.0],
})
expected_calls = [mock.call(city='London', country='UK'),
mock.call(q='London UK'),
mock.call(city='Sheffield', country='United Kingdom'),
mock.call(q='Sheffield United Kingdom'),
mock.call(city='Brussels', country='Belgium'),
mock.call(q='Brussels Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe, query_method='both')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_underlying_geocoding_function_called_with_query_method_only(self,
mocked_geocode,
test_dataframe):
mocked_geocode.side_effect = [{'lat': 1, 'lon': 4},
{'lat': 2, 'lon': 5},
{'lat': 3, 'lon': 6}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'latitude': [1.0, 2.0, 3.0],
'longitude': [4.0, 5.0, 6.0],
})
expected_calls = [mock.call(q='London UK'),
mock.call(q='Sheffield United Kingdom'),
mock.call(q='Brussels Belgium')]
geocoded_dataframe = geocode_batch_dataframe(test_dataframe, query_method='query_only')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
assert mocked_geocode.mock_calls == expected_calls
@mock.patch(_GEOCODE)
def test_valueerror_raised_when_invalid_query_method_passed(self,
mocked_geocode,
test_dataframe):
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method='cats')
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method='test')
with pytest.raises(ValueError):
geocode_batch_dataframe(test_dataframe, query_method=1)
@mock.patch(_GEOCODE)
def test_output_column_names_are_applied(self, mocked_geocode, test_dataframe):
# Generate dataframe using a mocked output
mocked_geocode.side_effect = [{'lat': '12.923432', 'lon': '-75.234569'},
{'lat': '99.999999', 'lon': '-88.888888'},
{'lat': '-2.202022', 'lon': '0.000000'}
]
# Expected outputs
expected_dataframe = pd.DataFrame({'index': [0, 1, 2],
'city': ['London', 'Sheffield', 'Brussels'],
'country': ['UK', 'United Kingdom', 'Belgium'],
'lat': [12.923432, 99.999999, -2.202022],
'lon': [-75.234569, -88.888888, 0.0]
})
geocoded_dataframe = geocode_batch_dataframe(test_dataframe,
latitude='lat',
longitude='lon')
# Check expected behaviours
assert_frame_equal(geocoded_dataframe, expected_dataframe,
check_like=True, check_dtype=False)
class TestCountryIsoCode():
@mock.patch(PYCOUNTRY)
def test_lookup_via_name(self, mocked_pycountry):
mocked_pycountry.return_value = 'country_object'
expected_calls = [mock.call(name='United Kingdom')]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 1
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_lookup_via_common_name(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), 'country_object']
expected_calls = [mock.call(name='United Kingdom'),
mock.call(common_name='United Kingdom')
]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 2
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_lookup_via_official_name(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), KeyError(), 'country_object']
expected_calls = [mock.call(name='United Kingdom'),
mock.call(common_name='United Kingdom'),
mock.call(official_name='United Kingdom')
]
assert country_iso_code('United Kingdom') == 'country_object'
assert mocked_pycountry.mock_calls == expected_calls
assert mocked_pycountry.call_count == 3
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_invalid_lookup_raises_keyerror(self, mocked_pycountry):
mocked_pycountry.side_effect = [KeyError(), KeyError(), KeyError()]*2
with pytest.raises(KeyError) as e:
country_iso_code('Fake Country')
assert 'Fake Country not found' in str(e.value)
country_iso_code.cache_clear()
@mock.patch(PYCOUNTRY)
def test_title_case_is_applied(self, mocked_pycountry):
expected_calls = []
names = ['united kingdom', 'UNITED KINGDOM',
'United kingdom']
mocked_pycountry.side_effect = [KeyError(), KeyError(), KeyError(), 'blah'] * len(names)
for name in names:
country_iso_code(name) # Find the iso codes
raw_call = mock.call(name=name)
common_call = mock.call(common_name=name)
official_call = mock.call(official_name=name)
title_call = mock.call(name='United Kingdom')
expected_calls.append(raw_call) # The initial call
expected_calls.append(common_call) # Tries common name call
expected_calls.append(official_call) # Tries official name
expected_calls.append(title_call) # The title case call
assert mocked_pycountry.mock_calls == expected_calls
country_iso_code.cache_clear()
class TestCountryIsoCodeDataframe():
@staticmethod
def _mocked_response(alpha_2, alpha_3, numeric, continent):
'''Builds a mocked response for the patched country_iso_code function.'''
response = mock.Mock()
response.alpha_2 = alpha_2
response.alpha_3 = alpha_3
response.numeric = numeric
response.continent = continent
return response
@mock.patch(COUNTRY_ISO_CODE)
def test_valid_countries_coded(self, mocked_country_iso_code):
test_df = pd.DataFrame({'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States']
})
mocked_response_uk = self._mocked_response('GB', 'GBR', '123', 'EU')
mocked_response_be = self._mocked_response('BE', 'BEL', '875', 'EU')
mocked_response_us = self._mocked_response('US', 'USA', '014', 'NA')
mocked_country_iso_code.side_effect = [mocked_response_uk,
mocked_response_be,
mocked_response_us
]
expected_dataframe = pd.DataFrame(
{'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States'],
'country_alpha_2': ['GB', 'BE', 'US'],
'country_alpha_3': ['GBR', 'BEL', 'USA'],
'country_numeric': ['123', '875', '014'],
'continent': ['EU', 'EU', 'NA']
})
coded_df = country_iso_code_dataframe(test_df)
assert coded_df.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
@mock.patch(COUNTRY_ISO_CODE)
def test_invalid_countries_data_is_none(self, mocked_country_iso_code):
test_df = pd.DataFrame({'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States']
})
mocked_country_iso_code.side_effect = KeyError
expected_dataframe = pd.DataFrame(
{'index': [0, 1, 2],
'country': ['United Kingdom', 'Belgium', 'United States'],
'country_alpha_2': [None, None, None],
'country_alpha_3': [None, None, None],
'country_numeric': [None, None, None],
'continent': [None, None, None]
})
coded_df = country_iso_code_dataframe(test_df)
assert coded_df.to_dict(orient="records") == expected_dataframe.to_dict(orient="records")
class TestCountryIsoCodeToName():
def test_valid_iso_code_returns_name(self):
assert country_iso_code_to_name('ITA') == 'Italy'
assert country_iso_code_to_name('DEU') == 'Germany'
assert country_iso_code_to_name('GBR') == 'United Kingdom'
def test_invalid_iso_code_returns_none(self):
assert country_iso_code_to_name('FOO') is None
assert country_iso_code_to_name('ABC') is None
assert country_iso_code_to_name('ZZZ') is None
def test_generate_composite_key():
assert generate_composite_key('London', 'United Kingdom') == 'london_united-kingdom'
assert generate_composite_key('Paris', 'France') == 'paris_france'
assert generate_composite_key('Name-with hyphen', 'COUNTRY') == 'name-with-hyphen_country'
def test_generate_composite_key_raises_error_with_invalid_input():
with pytest.raises(ValueError):
generate_composite_key(None, 'UK')
with pytest.raises(ValueError):
generate_composite_key('city_only')
with pytest.raises(ValueError):
generate_composite_key(1, 2)
def test_get_continent_lookup():
continents = get_continent_lookup()
assert None in continents
assert '' in continents
assert continents['NA'] == 'North America'
assert len(continents) == 9 # 2 nulls + 7 continents
def test_get_country_region_lookup():
countries = get_country_region_lookup()
assert len(countries) > 100
assert len(countries) < 1000
assert all(len(k) == 2 for k in countries.keys())
assert all(type(v) is tuple for v in countries.values())
assert all(len(v) == 2 for v in countries.values())
all_regions = {v[1] for v in countries.values()}
assert len(all_regions) == 18
def test_country_continent_lookup():
lookup = get_country_continent_lookup()
non_nulls = {k: v for k, v in lookup.items()
if k is not None and k != ''}
# All iso2, so length == 2
assert all(len(k) == 2 for k in non_nulls.items())
assert all(len(v) == 2 for v in non_nulls.values())
# Either strings or Nones
country_types = set(type(v) for v in lookup.values())
assert country_types == {str, type(None)}
# Right ball-park of country and continent numbers
assert len(non_nulls) > 100 # num countries
assert len(non_nulls) < 1000 # num countries
assert len(set(non_nulls.values())) == 7 # num continents
| 48.195122 | 107 | 0.565494 | [
"MIT"
] | anniyanvr/nesta | nesta/packages/geo_utils/tests/test_geotools.py | 23,712 | Python |
from django.db import transaction
from rest_framework.serializers import ModelSerializer
from galaxy_api.api import models
class NamespaceLinkSerializer(ModelSerializer):
class Meta:
model = models.NamespaceLink
fields = ('name', 'url')
class NamespaceSerializer(ModelSerializer):
links = NamespaceLinkSerializer(many=True)
class Meta:
model = models.Namespace
fields = ('name', 'company', 'email', 'avatar_url', 'description', 'links')
read_only_fields = ('name', )
def update(self, instance, validated_data):
links = validated_data.pop('links')
with transaction.atomic():
instance = super().update(instance, validated_data)
instance.update_links(links)
return instance
| 26.166667 | 83 | 0.677707 | [
"Apache-2.0"
] | newswangerd/galaxy-api | galaxy_api/api/v3/serializers/namespace.py | 785 | Python |
"""
This module is for testing the distributions. Tests should focus on ensuring we can
expand distributions without missing emails or getting too many or running into infinite
loops.
"""
from django.test import TestCase
from ..models import EmailAddress, Distribution
class DistributionTestCase(TestCase):
def setUp(self):
self.test1 = EmailAddress.objects.create(email_address="test1@example.org")
self.test2 = EmailAddress.objects.create(email_address="test2@example.org")
self.all_emails = set([self.test1, self.test2])
self.disti = Distribution.objects.create(name="Test Disti")
self.disti.email_addresses.add(self.test1, self.test2)
# build disti with duplicates
self.dupe_disti = Distribution.objects.create(name="Dupe Disti")
self.dupe_disti.email_addresses.add(self.test1, self.test2)
self.dupe_disti.distributions.add(self.disti)
# build disti with self reference
self.self_disti = Distribution.objects.create(name="Self Disti")
self.self_disti.email_addresses.add(self.test1)
self.self_disti.distributions.add(self.self_disti)
# build disti with cyclic reference
self.cyclic_disti1 = Distribution.objects.create(name="Cyclic Disti 1")
self.cyclic_disti1.email_addresses.add(self.test1)
self.cyclic_disti2 = Distribution.objects.create(name="Cyclic Disti 2")
self.cyclic_disti2.email_addresses.add(self.test2)
self.cyclic_disti1.distributions.add(self.cyclic_disti2)
self.cyclic_disti2.distributions.add(self.cyclic_disti1)
def test_constructor_properties(self):
self.assertEqual(self.disti.name, "Test Disti")
emails = self.disti.email_addresses.all()
self.assertIn(self.test1, emails)
self.assertIn(self.test2, emails)
def test_collect_distribution(self):
"""
Test that emails are collected properly.
"""
test_emails = self.disti.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
def test_collect_distribution_with_duplicates(self):
"""
Test that a distribution with duplicates to ensure it only collects each email
once.
"""
test_emails = self.dupe_disti.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
def test_collect_distribution_with_self_references(self):
"""
Test that a distribution with self references to ensure it only collects each
email once, and without looping infinitely.
"""
test_emails = self.self_disti.collect_email_addresses()
self.assertEqual(len(test_emails), 1)
self.assertSetEqual(set([self.test1]), set(test_emails))
def test_collect_distribution_with_cyclic_references(self):
"""
Test that a distribution with cyclic references only collects each email once,
and without looping infinitely.
"""
test_emails = self.cyclic_disti1.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
test_emails = self.cyclic_disti2.collect_email_addresses()
self.assertEqual(len(test_emails), 2)
self.assertSetEqual(self.all_emails, set(test_emails))
| 42.036585 | 88 | 0.707572 | [
"MIT"
] | gregschmit/django-impression | impression/tests/test_distribution.py | 3,447 | Python |
from marshmallow import Schema, fields
from marshmallow.validate import OneOf
ticket_type = ("Bug", "Report", "Feature", "Request", "Other")
ticket_urgency = ("Low", "Mid", "High")
ticket_status = ("Open", "In Progress", "Completed", "Rejected")
class Ticket(Schema):
id = fields.Int(dump_only=True)
created_at = fields.DateTime(dump_only=True)
name = fields.Str(required=True)
email = fields.Email(required=True)
subject = fields.Str(required=True)
created_at = fields.DateTime(dump_only=True)
message = fields.Str(required=True)
type = fields.Str(required=True, validate=OneOf(ticket_type))
urgency = fields.Str(required=True, validate=OneOf(ticket_urgency))
status = fields.Str(
missing="Open", required=True, validate=OneOf(ticket_status)
)
class Comment(Schema):
id = fields.Int(dump_only=True)
message = fields.Str(required=True)
created_at = fields.DateTime(dump_only=True)
class User(Schema):
email = fields.Str(required=True)
password = fields.Str(required=True)
| 33.967742 | 71 | 0.705603 | [
"Apache-2.0"
] | barrachri/ticketbyrd | ticketbyrd/schema.py | 1,053 | Python |
from conans import ConanFile, CMake
import os
class TinyreflTool(ConanFile):
name = 'tinyrefl-tool'
version = '0.4.1'
url = 'https://github.com/Manu343726/tinyrefl'
description = ' A work in progress minimal C++ static reflection API and codegen tool'
scm = {
'type': 'git',
'url': 'https://github.com/Manu343726/tinyrefl',
'revision': 'auto',
'subfolder': 'tinyrefl'
}
generators = 'cmake'
build_requires = ('jsonformoderncpp/3.5.0@vthiery/stable',
'fmt/5.2.1@bincrafters/stable',
'ctti/0.0.2@Manu343726/testing',
'cppast/master@Manu343726/testing',
'llvm_support/6.0.1@Manu343726/testing')
requires = 'clang_executables/6.0.1@Manu343726/testing'
default_options = 'fmt:header_only=True'
settings = 'os', 'compiler', 'build_type', 'arch'
def build(self):
cmake = CMake(self)
cmake.configure(
source_folder='tinyrefl',
defs = {
'TINYREFL_BUILD_TESTS': False,
'TINYREFL_BUILD_EXAMPLES': False
}
)
cmake.build(target='tinyrefl-tool')
def package(self):
self.copy('tinyrefl-tool*',
src='bin',
dst='bin')
self.copy('utils.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'cmake'),
dst='cmake',
keep_path=False)
self.copy('driver.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'tool'),
dst='cmake',
keep_path=False)
self.copy('tinyrefl_tool-config.cmake',
src=os.path.join(self.source_folder, 'tinyrefl', 'cmake'),
dst='cmake',
keep_path=False)
self.copy('tinyrefl_tool-version.cmake',
dst='cmake',
keep_path=False)
| 31.728814 | 90 | 0.563568 | [
"MIT"
] | Bjoe/tinyrefl | tool/conanfile.py | 1,872 | Python |
import bpy
from bpy import context
from . import node_functions
from . import material_functions
from . import constants
import mathutils
def update_selected_image(self, context):
sel_texture = bpy.data.images[self.texture_index]
show_image_in_image_editor(sel_texture)
def show_image_in_image_editor(image):
for area in bpy.context.screen.areas:
if area.type == 'IMAGE_EDITOR':
area.spaces.active.image = image
def switch_baked_material(show_bake_material,affect):
current_bake_type = bpy.context.scene.bake_settings.get_current_bake_type()
material_name_suffix = constants.Material_Suffix.bake_type_mat_suffix[current_bake_type]
# on what object to work
if affect == 'active':
objects = [bpy.context.active_object]
elif affect == 'selected':
objects = bpy.context.selected_editable_objects
elif affect == 'visible':
objects = [ob for ob in bpy.context.view_layer.objects if ob.visible_get()]
elif affect == 'scene':
objects = bpy.context.scene.objects
all_mats = bpy.data.materials
baked_mats = [mat for mat in all_mats if material_name_suffix in mat.name]
for obj in objects:
if current_bake_type != "pbr":
baked_ao_flag = getattr(obj,"ao_map_name") != '' or getattr(obj,"lightmap_name") != ''
if not baked_ao_flag:
continue
for slot in obj.material_slots:
if show_bake_material:
for baked_mat in baked_mats:
if baked_mat.name == slot.material.name + material_name_suffix + obj.bake_version:
slot.material = baked_mat
else:
if (material_name_suffix in slot.material.name):
bake_material = slot.material
index = bake_material.name.find(material_name_suffix)
org_mat = all_mats.get(bake_material.name[0:index])
if org_mat is not None:
slot.material = org_mat
def preview_bake_texture(self,context):
context = bpy.context
bake_settings = context.scene.bake_settings
preview_bake_texture = context.scene.texture_settings.preview_bake_texture
vis_mats = material_functions.get_all_visible_materials()
for mat in vis_mats:
if not mat.node_tree:
continue
nodes = mat.node_tree.nodes
bake_texture_node = None
if bake_settings.lightmap_bake:
bake_texture_node = nodes.get(bake_settings.texture_node_lightmap)
elif bake_settings.ao_bake:
bake_texture_node = nodes.get(bake_settings.texture_node_ao)
if bake_texture_node is not None:
if preview_bake_texture:
node_functions.emission_setup(mat, bake_texture_node.outputs["Color"])
else:
pbr_node = node_functions.get_nodes_by_type(nodes, constants.Node_Types.pbr_node)
if len(pbr_node) == 0:
return
pbr_node = pbr_node[0]
node_functions.remove_node(mat, "Emission Bake")
node_functions.reconnect_PBR(mat, pbr_node)
def preview_lightmap(self, context):
preview_lightmap = context.scene.texture_settings.preview_lightmap
vis_mats = material_functions.get_all_visible_materials()
for material in vis_mats:
if not material.node_tree:
continue
nodes = material.node_tree.nodes
lightmap_node = nodes.get("Lightmap")
if lightmap_node is None:
continue
pbr_node = node_functions.get_pbr_node(material)
if pbr_node is None:
print("\n " + material.name + " has no PBR Node \n")
continue
base_color_input = node_functions.get_pbr_inputs(pbr_node)["base_color_input"]
emission_input = node_functions.get_pbr_inputs(pbr_node)["emission_input"]
lightmap_output = lightmap_node.outputs["Color"]
if preview_lightmap:
# add mix node
mix_node_name = "Mulitply Lightmap"
mix_node = node_functions.add_node(material,constants.Shader_Node_Types.mix, mix_node_name)
mix_node.blend_type = 'MULTIPLY'
mix_node.inputs[0].default_value = 1 # set factor to 1
pos_offset = mathutils.Vector((-200, 200))
mix_node.location = pbr_node.location + pos_offset
mix_node_input1 = mix_node.inputs["Color1"]
mix_node_input2 = mix_node.inputs["Color2"]
mix_node_output = mix_node.outputs["Color"]
# image texture in base color
if base_color_input.is_linked:
node_before_base_color = base_color_input.links[0].from_node
if not node_before_base_color.name == mix_node_name:
node_functions.make_link(material, node_before_base_color.outputs["Color"], mix_node_input1)
node_functions.make_link(material, lightmap_output, mix_node_input2)
node_functions.make_link(material, mix_node_output, base_color_input)
else :
mix_node_input1.default_value = base_color_input.default_value
node_functions.make_link(material, lightmap_output, mix_node_input2)
node_functions.make_link(material, mix_node_output, base_color_input)
node_functions.remove_link(material,lightmap_output,emission_input)
if not preview_lightmap:
# remove mix and reconnect base color
mix_node = nodes.get("Mulitply Lightmap")
if mix_node is not None:
color_input_connections = len(mix_node.inputs["Color1"].links)
if (color_input_connections == 0):
node_functions.remove_node(material,mix_node.name)
else:
node_functions.remove_reconnect_node(material,mix_node.name)
node_functions.link_pbr_to_output(material,pbr_node)
def lightmap_to_emission(self, context, connect):
vis_mats = material_functions.get_all_visible_materials()
for material in vis_mats:
if not material.node_tree:
continue
nodes = material.node_tree.nodes
pbr_node = node_functions.get_pbr_node(material)
lightmap_node = nodes.get("Lightmap")
if lightmap_node is None:
continue
emission_input = node_functions.get_pbr_inputs(pbr_node)["emission_input"]
lightmap_output = lightmap_node.outputs["Color"]
if connect:
node_functions.make_link(material, lightmap_output, emission_input)
else:
node_functions.remove_link(material,lightmap_output,emission_input)
| 38.481283 | 116 | 0.618677 | [
"MIT"
] | LorenzWieseke/GLBTextureTools | Functions/visibility_functions.py | 7,196 | Python |
#!/usr/bin/env python
import sys
import re
def setup_python3():
# Taken from "distribute" setup.py
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join
tmp_src = join("build", "src")
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, tmp_src)
return tmp_src
kwargs = {}
if sys.version_info[0] >= 3:
from setuptools import setup
kwargs['use_2to3'] = True
kwargs['install_requires'] = ['html5lib', 'rdflib>3.0.0']
kwargs['src_root'] = setup_python3()
else:
try:
from setuptools import setup
kwargs['test_suite'] = "nose.collector"
kwargs['install_requires'] = ['html5lib', 'rdflib>3.0.0']
except ImportError:
from distutils.core import setup
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
version = find_version('pyRdfa/__init__.py')
setup(
name = 'pyRdfa',
version = version,
description = "",
author = "",
author_email = "",
maintainer = "",
maintainer_email = "",
url = "",
license = "LICENSE",
platforms = ["any"],
classifiers = ["Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.4",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.2",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Natural Language :: English",
],
long_description = \
"""
""",
download_url = "%s.tar.gz" % version,
packages = ['pyRdfa',
'pyRdfa/host',
'pyRdfa/rdfs',
'pyRdfa/serializers',
'pyRdfa/transform',
],
**kwargs
)
| 30.8 | 82 | 0.570745 | [
"BSD-3-Clause"
] | DalavanCloud/PyRDFa | setup.py | 2,926 | Python |
import click
from typer.testing import CliRunner
import pytest
import os
from pathlib import Path
from ..main import install
from pytest_httpx import HTTPXMock
runner = CliRunner()
def get_test_resource(name: str) -> Path:
return Path(os.path.join(os.path.dirname(__file__), "testresources", name))
def test_install_invalid_archive(tmp_path):
data = b"data"
file_path = tmp_path / "test.tar"
with open(file_path, "wb") as f:
f.write(data)
with pytest.raises(click.exceptions.Exit):
install(
file_path,
["https://example.com"],
cache=False,
force=False,
start_on_boot=False,
)
assert os.listdir(tmp_path) == ["test.tar"]
def test_install(tmp_path, httpx_mock: HTTPXMock):
httpx_mock.add_response(
method="POST", json={"state": "success", "detail": "installed"}
)
time_skill = get_test_resource("time_example")
try:
install(
time_skill.as_posix(),
["https://example.com"],
cache=False,
force=False,
start_on_boot=False,
)
except click.exceptions.Exit as e:
assert e.exit_code == 0
| 25.659574 | 79 | 0.619403 | [
"MIT"
] | razzo04/rhasspy-skills-cli | rhasspy_skills_cli/tests/test_app.py | 1,206 | Python |
import datetime
import urllib
from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from django.utils.translation import ugettext as _
from rest_flex_fields import FlexFieldsModelSerializer
from rest_flex_fields.serializers import FlexFieldsSerializerMixin
from rest_framework import serializers
from readthedocs.builds.models import Build, Version
from readthedocs.core.utils import slugify
from readthedocs.core.utils.extend import SettingsOverrideObject
from readthedocs.oauth.models import RemoteOrganization, RemoteRepository
from readthedocs.organizations.models import Organization, Team
from readthedocs.projects.constants import (
LANGUAGES,
PROGRAMMING_LANGUAGES,
REPO_CHOICES,
)
from readthedocs.projects.models import (
EnvironmentVariable,
Project,
ProjectRelationship,
)
from readthedocs.redirects.models import TYPE_CHOICES as REDIRECT_TYPE_CHOICES
from readthedocs.redirects.models import Redirect
class UserSerializer(FlexFieldsModelSerializer):
class Meta:
model = User
fields = [
'username',
]
class BaseLinksSerializer(serializers.Serializer):
def _absolute_url(self, path):
scheme = 'http' if settings.DEBUG else 'https'
domain = settings.PRODUCTION_DOMAIN
return urllib.parse.urlunparse((scheme, domain, path, '', '', ''))
class BuildCreateSerializer(serializers.ModelSerializer):
"""
Used when triggering (create action) a ``Build`` for a specific ``Version``.
This serializer validates that no field is sent at all in the request.
"""
class Meta:
model = Build
fields = []
class BuildLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-builds-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'build_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_version(self, obj):
if obj.version:
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.version.slug,
},
)
return self._absolute_url(path)
return None
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class BuildURLsSerializer(BaseLinksSerializer, serializers.Serializer):
build = serializers.URLField(source='get_full_url')
project = serializers.SerializerMethodField()
version = serializers.SerializerMethodField()
def get_project(self, obj):
path = reverse(
'projects_detail',
kwargs={
'project_slug': obj.project.slug
}
)
return self._absolute_url(path)
def get_version(self, obj):
if obj.version:
path = reverse(
'project_version_detail',
kwargs={
'project_slug': obj.project.slug,
'version_slug': obj.version.slug
}
)
return self._absolute_url(path)
return None
class BuildConfigSerializer(FlexFieldsSerializerMixin, serializers.Serializer):
"""
Render ``Build.config`` property without modifying it.
.. note::
Any change on the output of that property will be reflected here,
which may produce incompatible changes in the API.
"""
def to_representation(self, instance): # pylint: disable=arguments-differ
# For now, we want to return the ``config`` object as it is without
# manipulating it.
return instance
class BuildStateSerializer(serializers.Serializer):
code = serializers.CharField(source='state')
name = serializers.SerializerMethodField()
def get_name(self, obj):
return obj.state.title()
class BuildSerializer(FlexFieldsModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
version = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='date')
finished = serializers.SerializerMethodField()
success = serializers.SerializerMethodField()
duration = serializers.IntegerField(source='length')
state = BuildStateSerializer(source='*')
_links = BuildLinksSerializer(source='*')
urls = BuildURLsSerializer(source='*')
class Meta:
model = Build
fields = [
'id',
'version',
'project',
'created',
'finished',
'duration',
'state',
'success',
'error',
'commit',
'_links',
'urls',
]
expandable_fields = {
'config': (BuildConfigSerializer,)
}
def get_finished(self, obj):
if obj.date and obj.length:
return obj.date + datetime.timedelta(seconds=obj.length)
def get_success(self, obj):
"""
Return ``None`` if the build is not finished.
This is needed because ``default=True`` in the model field.
"""
if obj.finished:
return obj.success
return None
class VersionLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-versions-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'version_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-versions-builds-list',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'parent_lookup_version__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class VersionDashboardURLsSerializer(BaseLinksSerializer, serializers.Serializer):
edit = serializers.SerializerMethodField()
def get_edit(self, obj):
path = reverse(
'project_version_detail',
kwargs={
'project_slug': obj.project.slug,
'version_slug': obj.slug,
})
return self._absolute_url(path)
class VersionURLsSerializer(BaseLinksSerializer, serializers.Serializer):
documentation = serializers.SerializerMethodField()
vcs = serializers.URLField(source='vcs_url')
dashboard = VersionDashboardURLsSerializer(source='*')
def get_documentation(self, obj):
return obj.project.get_docs_url(version_slug=obj.slug,)
class VersionSerializer(FlexFieldsModelSerializer):
ref = serializers.CharField()
downloads = serializers.SerializerMethodField()
urls = VersionURLsSerializer(source='*')
_links = VersionLinksSerializer(source='*')
class Meta:
model = Version
fields = [
'id',
'slug',
'verbose_name',
'identifier',
'ref',
'built',
'active',
'hidden',
'type',
'downloads',
'urls',
'_links',
]
expandable_fields = {
'last_build': (
BuildSerializer,
)
}
def get_downloads(self, obj):
downloads = obj.get_downloads()
data = {}
for k, v in downloads.items():
if k in ('html', 'pdf', 'epub'):
# Keep backward compatibility
if k == 'html':
k = 'htmlzip'
data[k] = ('http:' if settings.DEBUG else 'https:') + v
return data
class VersionUpdateSerializer(serializers.ModelSerializer):
"""
Used when modifying (update action) a ``Version``.
It only allows to make the Version active/non-active.
"""
class Meta:
model = Version
fields = [
'active',
'hidden',
]
class LanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, language):
return language
def get_name(self, language):
for code, name in LANGUAGES:
if code == language:
return name
return 'Unknown'
class ProgrammingLanguageSerializer(serializers.Serializer):
code = serializers.SerializerMethodField()
name = serializers.SerializerMethodField()
def get_code(self, programming_language):
return programming_language
def get_name(self, programming_language):
for code, name in PROGRAMMING_LANGUAGES:
if code == programming_language:
return name
return 'Unknown'
class ProjectURLsSerializer(BaseLinksSerializer, serializers.Serializer):
"""Serializer with all the user-facing URLs under Read the Docs."""
documentation = serializers.CharField(source='get_docs_url')
home = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
def get_home(self, obj):
path = reverse('projects_detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse('builds_project_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse('project_version_list', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
class RepositorySerializer(serializers.Serializer):
url = serializers.CharField(source='repo')
type = serializers.ChoiceField(
source='repo_type',
choices=REPO_CHOICES,
)
class ProjectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
versions = serializers.SerializerMethodField()
builds = serializers.SerializerMethodField()
environmentvariables = serializers.SerializerMethodField()
redirects = serializers.SerializerMethodField()
subprojects = serializers.SerializerMethodField()
superproject = serializers.SerializerMethodField()
translations = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse('projects-detail', kwargs={'project_slug': obj.slug})
return self._absolute_url(path)
def get_versions(self, obj):
path = reverse(
'projects-versions-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_environmentvariables(self, obj):
path = reverse(
'projects-environmentvariables-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_redirects(self, obj):
path = reverse(
'projects-redirects-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_builds(self, obj):
path = reverse(
'projects-builds-list',
kwargs={
'parent_lookup_project__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_subprojects(self, obj):
path = reverse(
'projects-subprojects-list',
kwargs={
'parent_lookup_parent__slug': obj.slug,
},
)
return self._absolute_url(path)
def get_superproject(self, obj):
path = reverse(
'projects-superproject',
kwargs={
'project_slug': obj.slug,
},
)
return self._absolute_url(path)
def get_translations(self, obj):
path = reverse(
'projects-translations-list',
kwargs={
'parent_lookup_main_language_project__slug': obj.slug,
},
)
return self._absolute_url(path)
class ProjectCreateSerializerBase(FlexFieldsModelSerializer):
"""Serializer used to Import a Project."""
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(source='project_url', required=False)
class Meta:
model = Project
fields = (
'name',
'language',
'programming_language',
'repository',
'homepage',
)
def validate_name(self, value):
potential_slug = slugify(value)
if Project.objects.filter(slug=potential_slug).exists():
raise serializers.ValidationError(
_('Project with slug "{0}" already exists.').format(potential_slug),
)
return value
class ProjectCreateSerializer(SettingsOverrideObject):
_default_class = ProjectCreateSerializerBase
class ProjectUpdateSerializerBase(FlexFieldsModelSerializer):
"""Serializer used to modify a Project once imported."""
repository = RepositorySerializer(source='*')
homepage = serializers.URLField(
source='project_url',
required=False,
)
class Meta:
model = Project
fields = (
# Settings
'name',
'repository',
'language',
'programming_language',
'homepage',
# Advanced Settings -> General Settings
'default_version',
'default_branch',
'analytics_code',
'analytics_disabled',
'show_version_warning',
'single_version',
'external_builds_enabled',
# NOTE: we do not allow to change any setting that can be set via
# the YAML config file.
)
class ProjectUpdateSerializer(SettingsOverrideObject):
_default_class = ProjectUpdateSerializerBase
class ProjectSerializer(FlexFieldsModelSerializer):
"""
Project serializer.
.. note::
When using organizations, projects don't have the concept of users.
But we have organization.users.
"""
homepage = serializers.SerializerMethodField()
language = LanguageSerializer()
programming_language = ProgrammingLanguageSerializer()
repository = RepositorySerializer(source='*')
urls = ProjectURLsSerializer(source='*')
subproject_of = serializers.SerializerMethodField()
translation_of = serializers.SerializerMethodField()
default_branch = serializers.CharField(source='get_default_branch')
tags = serializers.StringRelatedField(many=True)
if not settings.RTD_ALLOW_ORGANIZATIONS:
users = UserSerializer(many=True)
_links = ProjectLinksSerializer(source='*')
# TODO: adapt these fields with the proper names in the db and then remove
# them from here
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
class Meta:
model = Project
fields = [
'id',
'name',
'slug',
'created',
'modified',
'language',
'programming_language',
'homepage',
'repository',
'default_version',
'default_branch',
'subproject_of',
'translation_of',
'urls',
'tags',
# NOTE: ``expandable_fields`` must not be included here. Otherwise,
# they will be tried to be rendered and fail
# 'users',
# 'active_versions',
'_links',
]
if not settings.RTD_ALLOW_ORGANIZATIONS:
fields.append('users')
expandable_fields = {
# NOTE: this has to be a Model method, can't be a
# ``SerializerMethodField`` as far as I know
'active_versions': (
VersionSerializer,
{
'many': True,
}
)
}
if settings.RTD_ALLOW_ORGANIZATIONS:
expandable_fields.update({
'organization': (
'readthedocs.api.v3.serializers.OrganizationSerializer',
# NOTE: we cannot have a Project with multiple organizations.
{'source': 'organizations.first'},
),
'teams': (
serializers.SlugRelatedField,
{
'slug_field': 'slug',
'many': True,
'read_only': True,
},
),
})
def get_homepage(self, obj):
# Overridden only to return ``None`` when the project_url is ``''``
return obj.project_url or None
def get_translation_of(self, obj):
if obj.main_language_project:
return self.__class__(obj.main_language_project).data
def get_subproject_of(self, obj):
try:
return self.__class__(obj.superprojects.first().parent).data
except Exception:
return None
class SubprojectCreateSerializer(FlexFieldsModelSerializer):
"""Serializer used to define a Project as subproject of another Project."""
child = serializers.SlugRelatedField(
slug_field='slug',
queryset=Project.objects.none(),
)
class Meta:
model = ProjectRelationship
fields = [
'child',
'alias',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.parent_project = self.context['parent']
user = self.context['request'].user
self.fields['child'].queryset = (
self.parent_project.get_subproject_candidates(user)
)
# Give users a better error message.
self.fields['child'].error_messages['does_not_exist'] = _(
'Project with {slug_name}={value} is not valid as subproject'
)
def validate_alias(self, value):
# Check there is not a subproject with this alias already
subproject = self.parent_project.subprojects.filter(alias=value)
if subproject.exists():
raise serializers.ValidationError(
_('A subproject with this alias already exists'),
)
return value
# pylint: disable=arguments-differ
def validate(self, data):
self.parent_project.is_valid_as_superproject(
serializers.ValidationError
)
return data
class SubprojectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
parent = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-subprojects-detail',
kwargs={
'parent_lookup_parent__slug': obj.parent.slug,
'alias_slug': obj.alias,
},
)
return self._absolute_url(path)
def get_parent(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.parent.slug,
},
)
return self._absolute_url(path)
class ChildProjectSerializer(ProjectSerializer):
"""
Serializer to render a Project when listed under ProjectRelationship.
It's exactly the same as ``ProjectSerializer`` but without some fields.
"""
class Meta(ProjectSerializer.Meta):
fields = [
field for field in ProjectSerializer.Meta.fields
if field not in ['subproject_of']
]
class SubprojectSerializer(FlexFieldsModelSerializer):
"""Serializer to render a subproject (``ProjectRelationship``)."""
child = ChildProjectSerializer()
_links = SubprojectLinksSerializer(source='*')
class Meta:
model = ProjectRelationship
fields = [
'child',
'alias',
'_links',
]
class SubprojectDestroySerializer(FlexFieldsModelSerializer):
"""Serializer used to remove a subproject relationship to a Project."""
class Meta:
model = ProjectRelationship
fields = (
'alias',
)
class RedirectLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-redirects-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'redirect_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class RedirectSerializerBase(serializers.ModelSerializer):
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
created = serializers.DateTimeField(source='create_dt', read_only=True)
modified = serializers.DateTimeField(source='update_dt', read_only=True)
_links = RedirectLinksSerializer(source='*', read_only=True)
type = serializers.ChoiceField(source='redirect_type', choices=REDIRECT_TYPE_CHOICES)
class Meta:
model = Redirect
fields = [
'pk',
'created',
'modified',
'project',
'type',
'from_url',
'to_url',
'_links',
]
class RedirectCreateSerializer(RedirectSerializerBase):
pass
class RedirectDetailSerializer(RedirectSerializerBase):
"""Override RedirectSerializerBase to sanitize the empty fields."""
from_url = serializers.SerializerMethodField()
to_url = serializers.SerializerMethodField()
def get_from_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.from_url or None
def get_to_url(self, obj):
# Overridden only to return ``None`` when the description is ``''``
return obj.to_url or None
class EnvironmentVariableLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
project = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'projects-environmentvariables-detail',
kwargs={
'parent_lookup_project__slug': obj.project.slug,
'environmentvariable_pk': obj.pk,
},
)
return self._absolute_url(path)
def get_project(self, obj):
path = reverse(
'projects-detail',
kwargs={
'project_slug': obj.project.slug,
},
)
return self._absolute_url(path)
class EnvironmentVariableSerializer(serializers.ModelSerializer):
value = serializers.CharField(write_only=True)
project = serializers.SlugRelatedField(slug_field='slug', read_only=True)
_links = EnvironmentVariableLinksSerializer(source='*', read_only=True)
class Meta:
model = EnvironmentVariable
fields = [
'pk',
'created',
'modified',
'name',
'value',
'public',
'project',
'_links',
]
class OrganizationLinksSerializer(BaseLinksSerializer):
_self = serializers.SerializerMethodField()
projects = serializers.SerializerMethodField()
def get__self(self, obj):
path = reverse(
'organizations-detail',
kwargs={
'organization_slug': obj.slug,
})
return self._absolute_url(path)
def get_projects(self, obj):
path = reverse(
'organizations-projects-list',
kwargs={
'parent_lookup_organizations__slug': obj.slug,
},
)
return self._absolute_url(path)
class TeamSerializer(FlexFieldsModelSerializer):
# TODO: add ``projects`` as flex field when we have a
# /organizations/<slug>/teams/<slug>/projects endpoint
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
class Meta:
model = Team
fields = (
'name',
'slug',
'created',
'modified',
'access',
)
expandable_fields = {
'members': (UserSerializer, {'many': True}),
}
class OrganizationSerializer(FlexFieldsModelSerializer):
created = serializers.DateTimeField(source='pub_date')
modified = serializers.DateTimeField(source='modified_date')
owners = UserSerializer(many=True)
_links = OrganizationLinksSerializer(source='*')
class Meta:
model = Organization
fields = (
'name',
'description',
'url',
'slug',
'email',
'owners',
'created',
'modified',
'disabled',
'_links',
)
expandable_fields = {
'projects': (ProjectSerializer, {'many': True}),
'teams': (TeamSerializer, {'many': True}),
}
class RemoteOrganizationSerializer(serializers.ModelSerializer):
class Meta:
model = RemoteOrganization
fields = [
'pk',
'slug',
'name',
'avatar_url',
'url',
'vcs_provider',
'created',
'modified',
]
read_only_fields = fields
class RemoteRepositorySerializer(FlexFieldsModelSerializer):
admin = serializers.SerializerMethodField('is_admin')
class Meta:
model = RemoteRepository
fields = [
'pk',
'name',
'full_name',
'description',
'admin',
'avatar_url',
'ssh_url',
'clone_url',
'html_url',
'vcs',
'vcs_provider',
'private',
'default_branch',
'created',
'modified',
]
read_only_fields = fields
expandable_fields = {
'remote_organization': (
RemoteOrganizationSerializer, {'source': 'organization'}
),
'projects': (
ProjectSerializer, {'many': True}
)
}
def is_admin(self, obj):
request = self.context['request']
# Use annotated value from RemoteRepositoryViewSet queryset
if hasattr(obj, '_admin'):
return obj._admin
return obj.remote_repository_relations.filter(
user=request.user, admin=True
).exists()
| 28.180711 | 89 | 0.595468 | [
"MIT"
] | Dithn/readthedocs.org | readthedocs/api/v3/serializers.py | 27,758 | Python |
# Copyright 2021 Edoardo Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Complexity: O(nlog(n))
def search_in_sorted_matrix(A, x):
for S in A:
if binary_search(S, x):
return True
return False
def binary_search(A, x):
low = 0
high = len(A) - 1
mid = 0
while low <= high:
mid = (high + low) // 2
if A[mid] < x:
low = mid + 1
elif A[mid] > x:
high = mid - 1
else:
return True
return False
mat = [[1, 2, 3, 4, 5], [9, 10, 20, 32, 55]]
print(search_in_sorted_matrix(mat, 56))
| 24.533333 | 74 | 0.629529 | [
"Apache-2.0"
] | edoriggio/algorithms-and-data-structures | exercises/search_in_sorted_matrix.py | 1,104 | Python |
"""Bokeh ELPDPlot."""
import warnings
import bokeh.plotting as bkp
from bokeh.models.annotations import Title
from bokeh.models import ColumnDataSource
import bokeh.models.markers as mk
import numpy as np
from . import backend_kwarg_defaults
from .. import show_layout
from ...plot_utils import _scale_fig_size
from ....rcparams import rcParams, _validate_bokeh_marker
def plot_elpd(
ax,
models,
pointwise_data,
numvars,
figsize,
textsize,
plot_kwargs,
markersize,
xlabels,
coord_labels,
xdata,
threshold,
backend_kwargs,
show,
):
"""Bokeh elpd plot."""
if backend_kwargs is None:
backend_kwargs = {}
backend_kwargs = {
**backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi"),),
**backend_kwargs,
}
dpi = backend_kwargs.pop("dpi")
if numvars == 2:
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 1, numvars - 1
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
backend_kwargs.setdefault("width", int(figsize[0] * dpi))
backend_kwargs.setdefault("height", int(figsize[1] * dpi))
ax = bkp.figure(**backend_kwargs)
ydata = pointwise_data[0] - pointwise_data[1]
_plot_atomic_elpd(
ax, xdata, ydata, *models, threshold, coord_labels, xlabels, True, True, plot_kwargs
)
show_layout(ax, show)
else:
max_plots = (
numvars ** 2 if rcParams["plot.max_subplots"] is None else rcParams["plot.max_subplots"]
)
vars_to_plot = np.sum(np.arange(numvars).cumsum() < max_plots)
if vars_to_plot < numvars:
warnings.warn(
"rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number "
"of resulting ELPD pairwise plots with these variables, generating only a "
"{side}x{side} grid".format(max_plots=max_plots, side=vars_to_plot),
UserWarning,
)
numvars = vars_to_plot
(figsize, _, _, _, _, markersize) = _scale_fig_size(
figsize, textsize, numvars - 2, numvars - 2
)
plot_kwargs.setdefault("s", markersize)
if ax is None:
ax = []
for row in range(numvars - 1):
ax_row = []
for col in range(numvars - 1):
if row == 0 and col == 0:
ax_first = bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
**backend_kwargs
)
ax_row.append(ax_first)
elif row < col:
ax_row.append(None)
else:
ax_row.append(
bkp.figure(
width=int(figsize[0] / (numvars - 1) * dpi),
height=int(figsize[1] / (numvars - 1) * dpi),
x_range=ax_first.x_range,
y_range=ax_first.y_range,
**backend_kwargs
)
)
ax.append(ax_row)
ax = np.array(ax)
for i in range(0, numvars - 1):
var1 = pointwise_data[i]
for j in range(0, numvars - 1):
if j < i:
continue
var2 = pointwise_data[j + 1]
ydata = var1 - var2
_plot_atomic_elpd(
ax[j, i],
xdata,
ydata,
models[i],
models[j + 1],
threshold,
coord_labels,
xlabels,
j == numvars - 2,
i == 0,
plot_kwargs,
)
show_layout(ax, show)
return ax
def _plot_atomic_elpd(
ax_,
xdata,
ydata,
model1,
model2,
threshold,
coord_labels,
xlabels,
xlabels_shown,
ylabels_shown,
plot_kwargs,
):
marker = _validate_bokeh_marker(plot_kwargs.get("marker"))
marker_func = getattr(mk, marker)
sizes = np.ones(len(xdata)) * plot_kwargs.get("s")
glyph = marker_func(
x="xdata", y="ydata", size="sizes", line_color=plot_kwargs.get("color", "black")
)
source = ColumnDataSource(dict(xdata=xdata, ydata=ydata, sizes=sizes))
ax_.add_glyph(source, glyph)
if threshold is not None:
diff_abs = np.abs(ydata - ydata.mean())
bool_ary = diff_abs > threshold * ydata.std()
if coord_labels is None:
coord_labels = xdata.astype(str)
outliers = np.argwhere(bool_ary).squeeze()
for outlier in outliers:
label = coord_labels[outlier]
ax_.text(
x=np.asarray(outlier), y=np.asarray(ydata[outlier]), text=label, text_color="black",
)
if ylabels_shown:
ax_.yaxis.axis_label = "ELPD difference"
else:
ax_.yaxis.minor_tick_line_color = None
ax_.yaxis.major_label_text_font_size = "0pt"
if xlabels_shown:
if xlabels:
ax_.xaxis.ticker = np.arange(0, len(coord_labels))
ax_.xaxis.major_label_overrides = {
str(key): str(value)
for key, value in zip(np.arange(0, len(coord_labels)), list(coord_labels))
}
else:
ax_.xaxis.minor_tick_line_color = None
ax_.xaxis.major_label_text_font_size = "0pt"
title = Title()
title.text = "{} - {}".format(model1, model2)
ax_.title = title
| 31.857923 | 100 | 0.521269 | [
"Apache-2.0"
] | Brahanyaa98/arviz | arviz/plots/backends/bokeh/elpdplot.py | 5,830 | Python |
# ============================================================================
# FILE: default.py
# AUTHOR: Shougo Matsushita <Shougo.Matsu at gmail.com>
# License: MIT license
# ============================================================================
import re
import typing
from denite.util import echo, error, clearmatch, regex_convert_py_vim
from denite.util import Nvim, UserContext, Candidates, Candidate
from denite.parent import SyncParent
class Default(object):
@property
def is_async(self) -> bool:
return self._is_async
def __init__(self, vim: Nvim) -> None:
self._vim = vim
self._denite: typing.Optional[SyncParent] = None
self._selected_candidates: typing.List[int] = []
self._candidates: Candidates = []
self._cursor = 0
self._entire_len = 0
self._result: typing.List[typing.Any] = []
self._context: UserContext = {}
self._bufnr = -1
self._winid = -1
self._winrestcmd = ''
self._initialized = False
self._winheight = 0
self._winwidth = 0
self._winminheight = -1
self._is_multi = False
self._is_async = False
self._matched_pattern = ''
self._displayed_texts: typing.List[str] = []
self._statusline_sources = ''
self._titlestring = ''
self._ruler = False
self._prev_action = ''
self._prev_status: typing.Dict[str, typing.Any] = {}
self._prev_curpos: typing.List[typing.Any] = []
self._save_window_options: typing.Dict[str, typing.Any] = {}
self._sources_history: typing.List[typing.Any] = []
self._previous_text = ''
self._floating = False
self._filter_floating = False
self._updated = False
self._timers: typing.Dict[str, int] = {}
self._matched_range_id = -1
self._matched_char_id = -1
self._check_matchdelete = bool(self._vim.call(
'denite#util#check_matchdelete'))
def start(self, sources: typing.List[typing.Any],
context: UserContext) -> typing.List[typing.Any]:
if not self._denite:
# if hasattr(self._vim, 'run_coroutine'):
# self._denite = ASyncParent(self._vim)
# else:
self._denite = SyncParent(self._vim)
self._result = []
context['sources_queue'] = [sources]
self._start_sources_queue(context)
return self._result
def do_action(self, action_name: str,
command: str = '', is_manual: bool = False) -> None:
if is_manual:
candidates = self._get_selected_candidates()
elif self._get_cursor_candidate():
candidates = [self._get_cursor_candidate()]
else:
candidates = []
if not self._denite or not candidates or not action_name:
return
self._prev_action = action_name
action = self._denite.get_action(
self._context, action_name, candidates)
if not action:
return
post_action = self._context['post_action']
is_quit = action['is_quit'] or post_action == 'quit'
if is_quit:
self.quit()
self._denite.do_action(self._context, action_name, candidates)
self._result = candidates
if command != '':
self._vim.command(command)
if is_quit and post_action == 'open':
# Re-open denite buffer
prev_cursor = self._cursor
cursor_candidate = self._get_cursor_candidate()
self._init_buffer()
self.redraw(False)
if cursor_candidate == self._get_candidate(prev_cursor):
# Restore the cursor
self._move_to_pos(prev_cursor)
# Disable quit flag
is_quit = False
if not is_quit and is_manual:
self._selected_candidates = []
self.redraw(action['is_redraw'])
if is_manual and self._context['sources_queue']:
self._context['input'] = ''
self._context['quick_move'] = ''
self._start_sources_queue(self._context)
return
def redraw(self, is_force: bool = True) -> None:
self._context['is_redraw'] = is_force
if is_force:
self._gather_candidates()
if self._update_candidates():
self._update_buffer()
else:
self._update_status()
self._context['is_redraw'] = False
def quit(self) -> None:
if self._denite:
self._denite.on_close(self._context)
self._quit_buffer()
self._result = []
return
def _restart(self) -> None:
self._context['input'] = ''
self._quit_buffer()
self._init_denite()
self._gather_candidates()
self._init_buffer()
self._update_candidates()
self._update_buffer()
def _start_sources_queue(self, context: UserContext) -> None:
if not context['sources_queue']:
return
self._sources_history.append({
'sources': context['sources_queue'][0],
'path': context['path'],
})
self._start(context['sources_queue'][0], context)
if context['sources_queue']:
context['sources_queue'].pop(0)
context['path'] = self._context['path']
def _start(self, sources: typing.List[typing.Any],
context: UserContext) -> None:
from denite.ui.map import do_map
self._vim.command('silent! autocmd! denite')
if re.search(r'\[Command Line\]$', self._vim.current.buffer.name):
# Ignore command line window.
return
resume = self._initialized and context['resume']
if resume:
# Skip the initialization
update = ('immediately', 'immediately_1',
'cursor_pos', 'prev_winid',
'start_filter', 'quick_move')
for key in update:
self._context[key] = context[key]
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
if context['refresh']:
self.redraw()
self._move_to_pos(self._cursor)
else:
if self._context != context:
self._context.clear()
self._context.update(context)
self._context['sources'] = sources
self._context['is_redraw'] = False
self._is_multi = len(sources) > 1
if not sources:
# Ignore empty sources.
error(self._vim, 'Empty sources')
return
self._init_denite()
self._gather_candidates()
self._update_candidates()
self._init_cursor()
self._check_move_option()
if self._check_do_option():
return
self._init_buffer()
self._update_displayed_texts()
self._update_buffer()
self._move_to_pos(self._cursor)
if self._context['quick_move'] and do_map(self, 'quick_move', []):
return
if self._context['start_filter']:
do_map(self, 'open_filter_buffer', [])
def _init_buffer(self) -> None:
self._prev_status = dict()
self._displayed_texts = []
self._prev_bufnr = self._vim.current.buffer.number
self._prev_curpos = self._vim.call('getcurpos')
self._prev_wininfo = self._get_wininfo()
self._prev_winid = int(self._context['prev_winid'])
self._winrestcmd = self._vim.call('winrestcmd')
self._ruler = self._vim.options['ruler']
self._switch_buffer()
self._bufnr = self._vim.current.buffer.number
self._winid = self._vim.call('win_getid')
self._resize_buffer(True)
self._winheight = self._vim.current.window.height
self._winwidth = self._vim.current.window.width
self._bufvars = self._vim.current.buffer.vars
self._bufvars['denite'] = {
'buffer_name': self._context['buffer_name'],
}
self._bufvars['denite_statusline'] = {}
self._vim.vars['denite#_previewed_buffers'] = {}
self._save_window_options = {}
window_options = {
'colorcolumn',
'concealcursor',
'conceallevel',
'cursorcolumn',
'cursorline',
'foldcolumn',
'foldenable',
'list',
'number',
'relativenumber',
'signcolumn',
'spell',
'winfixheight',
'wrap',
}
for k in window_options:
self._save_window_options[k] = self._vim.current.window.options[k]
# Note: Have to use setlocal instead of "current.window.options"
# "current.window.options" changes global value instead of local in
# neovim.
self._vim.command('setlocal colorcolumn=')
self._vim.command('setlocal conceallevel=3')
self._vim.command('setlocal concealcursor=inv')
self._vim.command('setlocal nocursorcolumn')
self._vim.command('setlocal nofoldenable')
self._vim.command('setlocal foldcolumn=0')
self._vim.command('setlocal nolist')
self._vim.command('setlocal nonumber')
self._vim.command('setlocal norelativenumber')
self._vim.command('setlocal nospell')
self._vim.command('setlocal winfixheight')
self._vim.command('setlocal nowrap')
self._vim.command('setlocal signcolumn=no')
if self._context['cursorline']:
self._vim.command('setlocal cursorline')
options = self._vim.current.buffer.options
if self._floating:
# Disable ruler
self._vim.options['ruler'] = False
options['buftype'] = 'nofile'
options['bufhidden'] = 'delete'
options['swapfile'] = False
options['buflisted'] = False
options['modeline'] = False
options['modifiable'] = False
options['filetype'] = 'denite'
if self._vim.call('exists', '#WinEnter'):
self._vim.command('doautocmd WinEnter')
if self._vim.call('exists', '#BufWinEnter'):
self._vim.command('doautocmd BufWinEnter')
if not self._vim.call('has', 'nvim'):
# In Vim8, FileType autocmd is not fired after set filetype option.
self._vim.command('silent doautocmd FileType denite')
if self._context['auto_action']:
self._vim.command('autocmd denite '
'CursorMoved <buffer> '
'call denite#call_map("auto_action")')
self._init_syntax()
def _switch_buffer(self) -> None:
split = self._context['split']
if (split != 'no' and self._winid > 0 and
self._vim.call('win_gotoid', self._winid)):
if split != 'vertical' and not self._floating:
# Move the window to bottom
self._vim.command('wincmd J')
self._winrestcmd = ''
return
self._floating = split in ['floating', 'floating_relative']
self._filter_floating = False
command = 'edit'
if split == 'tab':
self._vim.command('tabnew')
elif self._floating:
# Use floating window
if self._vim.current.buffer.options['filetype'] != 'denite':
self._titlestring = self._vim.options['titlestring']
if split == 'floating':
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'editor',
'row': int(self._context['winrow']),
'col': int(self._context['wincol']),
'width': int(self._context['winwidth']),
'height': int(self._context['winheight']),
})
elif split == 'floating_relative':
opened_pos = (self._vim.call('nvim_win_get_position', 0)[0] +
self._vim.call('winline') - 1)
if self._context['auto_resize']:
height = max(self._winheight, 1)
width = max(self._winwidth, 1)
else:
width = int(self._context['winwidth'])
height = int(self._context['winheight'])
if opened_pos + height + 3 > self._vim.eval('&lines'):
anchor = 'SW'
row = 0
self._context['filter_winrow'] = row + opened_pos
else:
anchor = 'NW'
row = 1
self._context['filter_winrow'] = row + height + opened_pos
self._vim.call(
'nvim_open_win',
self._vim.call('bufnr', '%'), True, {
'relative': 'cursor',
'row': row,
'col': 0,
'width': width,
'height': height,
'anchor': anchor,
})
elif self._context['filter_split_direction'] == 'floating':
self._titlestring = self._vim.options['titlestring']
self._filter_floating = True
elif split != 'no':
command = self._get_direction()
command += ' vsplit' if split == 'vertical' else ' split'
bufname = '[denite]-' + self._context['buffer_name']
if self._vim.call('exists', '*bufadd'):
bufnr = self._vim.call('bufadd', bufname)
vertical = 'vertical' if split == 'vertical' else ''
command = (
'buffer' if split
in ['no', 'tab', 'floating', 'floating_relative'] else 'sbuffer')
self._vim.command(
'silent keepalt %s %s %s %s' % (
self._get_direction(),
vertical,
command,
bufnr,
)
)
else:
self._vim.call(
'denite#util#execute_path',
f'silent keepalt {command}', bufname)
def _get_direction(self) -> str:
direction = str(self._context['direction'])
if direction == 'dynamictop' or direction == 'dynamicbottom':
self._update_displayed_texts()
winwidth = self._vim.call('winwidth', 0)
is_fit = not [x for x in self._displayed_texts
if self._vim.call('strwidth', x) > winwidth]
if direction == 'dynamictop':
direction = 'aboveleft' if is_fit else 'topleft'
else:
direction = 'belowright' if is_fit else 'botright'
return direction
def _get_wininfo(self) -> typing.List[typing.Any]:
return [
self._vim.options['columns'], self._vim.options['lines'],
self._vim.call('win_getid'), self._vim.call('tabpagebuflist')
]
def _switch_prev_buffer(self) -> None:
if (self._prev_bufnr == self._bufnr or
self._vim.buffers[self._prev_bufnr].name == ''):
self._vim.command('enew')
else:
self._vim.command('buffer ' + str(self._prev_bufnr))
def _init_syntax(self) -> None:
self._vim.command('syntax case ignore')
self._vim.command('highlight default link deniteInput ModeMsg')
self._vim.command('highlight link deniteMatchedRange ' +
self._context['highlight_matched_range'])
self._vim.command('highlight link deniteMatchedChar ' +
self._context['highlight_matched_char'])
self._vim.command('highlight default link ' +
'deniteStatusLinePath Comment')
self._vim.command('highlight default link ' +
'deniteStatusLineNumber LineNR')
self._vim.command('highlight default link ' +
'deniteSelectedLine Statement')
if self._floating:
self._vim.current.window.options['winhighlight'] = (
'Normal:' + self._context['highlight_window_background']
)
self._vim.command(('syntax match deniteSelectedLine /^[%s].*/' +
' contains=deniteConcealedMark') % (
self._context['selected_icon']))
self._vim.command(('syntax match deniteConcealedMark /^[ %s]/' +
' conceal contained') % (
self._context['selected_icon']))
if self._denite:
self._denite.init_syntax(self._context, self._is_multi)
def _update_candidates(self) -> bool:
if not self._denite:
return False
[self._is_async, pattern, statuses, self._entire_len,
self._candidates] = self._denite.filter_candidates(self._context)
prev_displayed_texts = self._displayed_texts
self._update_displayed_texts()
prev_matched_pattern = self._matched_pattern
self._matched_pattern = pattern
prev_statusline_sources = self._statusline_sources
self._statusline_sources = ' '.join(statuses)
if self._is_async:
self._start_timer('update_candidates')
else:
self._stop_timer('update_candidates')
updated = (self._displayed_texts != prev_displayed_texts or
self._matched_pattern != prev_matched_pattern or
self._statusline_sources != prev_statusline_sources)
if updated:
self._updated = True
self._start_timer('update_buffer')
if self._context['search'] and self._context['input']:
self._vim.call('setreg', '/', self._context['input'])
return self._updated
def _update_displayed_texts(self) -> None:
candidates_len = len(self._candidates)
if not self._is_async and self._context['auto_resize']:
winminheight = int(self._context['winminheight'])
max_height = min(int(self._context['winheight']),
self._get_max_height())
if (winminheight != -1 and candidates_len < winminheight):
self._winheight = winminheight
elif candidates_len > max_height:
self._winheight = max_height
elif candidates_len != self._winheight:
self._winheight = candidates_len
max_source_name_len = 0
if self._candidates:
max_source_name_len = max([
len(self._get_display_source_name(x['source_name']))
for x in self._candidates])
self._context['max_source_name_len'] = max_source_name_len
self._context['max_source_name_format'] = (
'{:<' + str(self._context['max_source_name_len']) + '}')
self._displayed_texts = [
self._get_candidate_display_text(i)
for i in range(0, candidates_len)
]
def _update_buffer(self) -> None:
is_current_buffer = self._bufnr == self._vim.current.buffer.number
self._update_status()
if self._check_matchdelete and self._context['match_highlight']:
matches = [x['id'] for x in
self._vim.call('getmatches', self._winid)]
if self._matched_range_id in matches:
self._vim.call('matchdelete',
self._matched_range_id, self._winid)
self._matched_range_id = -1
if self._matched_char_id in matches:
self._vim.call('matchdelete',
self._matched_char_id, self._winid)
self._matched_char_id = -1
if self._matched_pattern != '':
self._matched_range_id = self._vim.call(
'matchadd', 'deniteMatchedRange',
r'\c' + regex_convert_py_vim(self._matched_pattern),
10, -1, {'window': self._winid})
matched_char_pattern = '[{}]'.format(re.sub(
r'([\[\]\\^-])',
r'\\\1',
self._context['input'].replace(' ', '')
))
self._matched_char_id = self._vim.call(
'matchadd', 'deniteMatchedChar',
matched_char_pattern,
10, -1, {'window': self._winid})
prev_linenr = self._vim.call('line', '.')
prev_candidate = self._get_cursor_candidate()
buffer = self._vim.buffers[self._bufnr]
buffer.options['modifiable'] = True
self._vim.vars['denite#_candidates'] = [
x['word'] for x in self._candidates]
buffer[:] = self._displayed_texts
buffer.options['modifiable'] = False
self._previous_text = self._context['input']
self._resize_buffer(is_current_buffer)
is_changed = (self._context['reversed'] or
(is_current_buffer and
self._previous_text != self._context['input']))
if self._updated and is_changed:
if not is_current_buffer:
save_winid = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
self._init_cursor()
self._move_to_pos(self._cursor)
if not is_current_buffer:
self._vim.call('win_gotoid', save_winid)
elif is_current_buffer:
self._vim.call('cursor', [prev_linenr, 0])
if is_current_buffer:
if (self._context['auto_action'] and
prev_candidate != self._get_cursor_candidate()):
self.do_action(self._context['auto_action'])
self._updated = False
self._stop_timer('update_buffer')
def _update_status(self) -> None:
inpt = ''
if self._context['input']:
inpt = self._context['input'] + ' '
if self._context['error_messages']:
inpt = '[ERROR] ' + inpt
path = '[' + self._context['path'] + ']'
status = {
'input': inpt,
'sources': self._statusline_sources,
'path': path,
# Extra
'buffer_name': self._context['buffer_name'],
'line_total': len(self._candidates),
}
if status == self._prev_status:
return
self._bufvars['denite_statusline'] = status
self._prev_status = status
linenr = "printf('%'.(len(line('$'))+2).'d/%d',line('.'),line('$'))"
if self._context['statusline']:
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = (
"%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} " +
" %{denite#get_status('path')}%*" +
"%{" + linenr + "}%*")
else:
winnr = self._vim.call('win_id2win', self._winid)
self._vim.call('setwinvar', winnr, '&statusline', (
"%#deniteInput#%{denite#get_status('input')}%* " +
"%{denite#get_status('sources')} %=" +
"%#deniteStatusLinePath# %{denite#get_status('path')}%*" +
"%#deniteStatusLineNumber#%{" + linenr + "}%*"))
def _get_display_source_name(self, name: str) -> str:
source_names = self._context['source_names']
if not self._is_multi or source_names == 'hide':
source_name = ''
else:
short_name = (re.sub(r'([a-zA-Z])[a-zA-Z]+', r'\1', name)
if re.search(r'[^a-zA-Z]', name) else name[:2])
source_name = short_name if source_names == 'short' else name
return source_name
def _get_candidate_display_text(self, index: int) -> str:
source_names = self._context['source_names']
candidate = self._candidates[index]
terms = []
if self._is_multi and source_names != 'hide':
terms.append(self._context['max_source_name_format'].format(
self._get_display_source_name(candidate['source_name'])))
encoding = self._context['encoding']
abbr = candidate.get('abbr', candidate['word']).encode(
encoding, errors='replace').decode(encoding, errors='replace')
terms.append(abbr[:int(self._context['max_candidate_width'])])
return (self._context['selected_icon'] # type: ignore
if index in self._selected_candidates
else ' ') + ' '.join(terms).replace('\n', '')
def _get_max_height(self) -> int:
return int(self._vim.options['lines']) if not self._floating else (
int(self._vim.options['lines']) -
int(self._context['winrow']) -
int(self._vim.options['cmdheight']))
def _resize_buffer(self, is_current_buffer: bool) -> None:
split = self._context['split']
if (split == 'no' or split == 'tab' or
self._vim.call('winnr', '$') == 1):
return
winheight = max(self._winheight, 1)
winwidth = max(self._winwidth, 1)
is_vertical = split == 'vertical'
if not is_current_buffer:
restore = self._vim.call('win_getid')
self._vim.call('win_gotoid', self._winid)
if not is_vertical and self._vim.current.window.height != winheight:
if self._floating:
wincol = int(self._context['winrow'])
row = wincol
if split == 'floating':
if self._context['auto_resize'] and row > 1:
row += int(self._context['winheight'])
row -= self._winheight
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'editor',
'row': row,
'col': int(self._context['wincol']),
'width': winwidth,
'height': winheight,
})
filter_row = 0 if wincol == 1 else row + winheight
filter_col = int(self._context['wincol'])
elif split == 'floating_relative':
init_pos = self._vim.call('nvim_win_get_config',
self._winid)
self._vim.call('nvim_win_set_config', self._winid, {
'relative': 'win',
'win': init_pos['win'],
'row': init_pos['row'],
'col': init_pos['col'],
'width': winwidth,
'height': winheight,
})
filter_col = init_pos['col']
if init_pos['anchor'] == 'NW':
winpos = self._vim.call('nvim_win_get_position',
self._winid)
filter_row = winpos[0] + winheight
filter_winid = self._vim.vars['denite#_filter_winid']
self._context['filter_winrow'] = row
if self._vim.call('win_id2win', filter_winid) > 0:
self._vim.call('nvim_win_set_config', filter_winid, {
'relative': 'editor',
'row': filter_row,
'col': filter_col,
})
self._vim.command('resize ' + str(winheight))
if self._context['reversed']:
self._vim.command('normal! zb')
elif is_vertical and self._vim.current.window.width != winwidth:
self._vim.command('vertical resize ' + str(winwidth))
if not is_current_buffer:
self._vim.call('win_gotoid', restore)
def _check_do_option(self) -> bool:
if self._context['do'] != '':
self._do_command(self._context['do'])
return True
elif (self._candidates and self._context['immediately'] or
len(self._candidates) == 1 and self._context['immediately_1']):
self._do_immediately()
return True
return not (self._context['empty'] or
self._is_async or self._candidates)
def _check_move_option(self) -> None:
if self._context['cursor_pos'].isnumeric():
self._cursor = int(self._context['cursor_pos']) + 1
elif re.match(r'\+\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_next_line()
elif re.match(r'-\d+', self._context['cursor_pos']):
for _ in range(int(self._context['cursor_pos'][1:])):
self._move_to_prev_line()
elif self._context['cursor_pos'] == '$':
self._move_to_last_line()
def _do_immediately(self) -> None:
goto = self._winid > 0 and self._vim.call(
'win_gotoid', self._winid)
if goto:
# Jump to denite window
self._init_buffer()
self.do_action('default')
candidate = self._get_cursor_candidate()
if not candidate:
return
echo(self._vim, 'Normal', '[{}/{}] {}'.format(
self._cursor, len(self._candidates),
candidate.get('abbr', candidate['word'])))
if goto:
# Move to the previous window
self._vim.command('wincmd p')
def _do_command(self, command: str) -> None:
self._init_cursor()
cursor = 1
while cursor < len(self._candidates):
self.do_action('default', command)
self._move_to_next_line()
self._quit_buffer()
def _cleanup(self) -> None:
self._stop_timer('update_candidates')
self._stop_timer('update_buffer')
if self._vim.current.buffer.number == self._bufnr:
self._cursor = self._vim.call('line', '.')
# Note: Close filter window before preview window
self._vim.call('denite#filter#_close_filter_window')
if not self._context['has_preview_window']:
self._vim.command('pclose!')
# Clear previewed buffers
for bufnr in self._vim.vars['denite#_previewed_buffers'].keys():
if not self._vim.call('win_findbuf', bufnr):
self._vim.command('silent bdelete ' + str(bufnr))
self._vim.vars['denite#_previewed_buffers'] = {}
self._vim.command('highlight! link CursorLine CursorLine')
if self._floating or self._filter_floating:
self._vim.options['titlestring'] = self._titlestring
self._vim.options['ruler'] = self._ruler
def _close_current_window(self) -> None:
if self._vim.call('winnr', '$') == 1:
self._vim.command('buffer #')
else:
self._vim.command('close!')
def _quit_buffer(self) -> None:
self._cleanup()
if self._vim.call('bufwinnr', self._bufnr) < 0:
# Denite buffer is already closed
return
winids = self._vim.call('win_findbuf',
self._vim.vars['denite#_filter_bufnr'])
if winids:
# Quit filter buffer
self._vim.call('win_gotoid', winids[0])
self._close_current_window()
# Move to denite window
self._vim.call('win_gotoid', self._winid)
# Restore the window
if self._context['split'] == 'no':
self._switch_prev_buffer()
for k, v in self._save_window_options.items():
self._vim.current.window.options[k] = v
else:
if self._context['split'] == 'tab':
self._vim.command('tabclose!')
if self._context['split'] != 'tab':
self._close_current_window()
self._vim.call('win_gotoid', self._prev_winid)
# Restore the position
self._vim.call('setpos', '.', self._prev_curpos)
if self._get_wininfo() and self._get_wininfo() == self._prev_wininfo:
# Note: execute restcmd twice to restore layout properly
self._vim.command(self._winrestcmd)
self._vim.command(self._winrestcmd)
clearmatch(self._vim)
def _get_cursor_candidate(self) -> Candidate:
return self._get_candidate(self._cursor)
def _get_candidate(self, pos: int) -> Candidate:
if not self._candidates or pos > len(self._candidates):
return {}
return self._candidates[pos - 1]
def _get_selected_candidates(self) -> Candidates:
if not self._selected_candidates:
return [self._get_cursor_candidate()
] if self._get_cursor_candidate() else []
return [self._candidates[x] for x in self._selected_candidates]
def _init_denite(self) -> None:
if self._denite:
self._denite.start(self._context)
self._denite.on_init(self._context)
self._initialized = True
self._winheight = int(self._context['winheight'])
self._winwidth = int(self._context['winwidth'])
def _gather_candidates(self) -> None:
self._selected_candidates = []
if self._denite:
self._denite.gather_candidates(self._context)
def _init_cursor(self) -> None:
if self._context['reversed']:
self._move_to_last_line()
self._vim.command('normal! zb')
else:
self._move_to_first_line()
def _move_to_pos(self, pos: int) -> None:
self._vim.call('cursor', pos, 0)
self._cursor = pos
def _move_to_next_line(self) -> None:
if self._cursor < len(self._candidates):
self._cursor += 1
def _move_to_prev_line(self) -> None:
if self._cursor >= 1:
self._cursor -= 1
def _move_to_first_line(self) -> None:
self._cursor = 1
def _move_to_last_line(self) -> None:
self._cursor = len(self._candidates)
def _start_timer(self, key: str) -> None:
if key in self._timers:
return
if key == 'update_candidates':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_candidates_timer', self._bufnr)
elif key == 'update_buffer':
self._timers[key] = self._vim.call(
'denite#helper#_start_update_buffer_timer', self._bufnr)
def _stop_timer(self, key: str) -> None:
if key not in self._timers:
return
self._vim.call('timer_stop', self._timers[key])
# Note: After timer_stop is called, self._timers may be removed
if key in self._timers:
self._timers.pop(key)
| 38.211329 | 79 | 0.54852 | [
"MIT"
] | supermomonga/denite.nvim | rplugin/python3/denite/ui/default.py | 35,078 | Python |
"""
"""
from __future__ import division
from torch.optim.optimizer import Optimizer, required
import numpy as np
import torch
from typing import NamedTuple, List
from dataclasses import dataclass
from enum import Enum
from typing import Union, Tuple
# from scipy.sparse.linalg import svds
from scipy.optimize import minimize_scalar
class LayerType(Enum):
CONV = 1
FC = 2
NON_CONV = 3
@dataclass
class LayerMetrics:
rank: float
KG: float
condition: float
@dataclass
class ConvLayerMetrics:
input_channel: LayerMetrics
output_channel: LayerMetrics
class LRMetrics(NamedTuple):
rank_velocity: List[float]
r_conv: List[float]
def EVBMF(Y, sigma2=None, H=None):
"""Implementation of the analytical solution to Empirical Variational
Bayes Matrix Factorization.
This function can be used to calculate the analytical solution to
empirical VBMF.
This is based on the paper and MatLab code by Nakajima et al.:
"Global analytic solution of fully-observed variational Bayesian matrix
factorization."
Notes
-----
If sigma2 is unspecified, it is estimated by minimizing the free
energy.
If H is unspecified, it is set to the smallest of the sides of the
input Y.
Attributes
----------
Y : numpy-array
Input matrix that is to be factorized. Y has shape (L,M), where L<=M.
sigma2 : int or None (default=None)
Variance of the noise on Y.
H : int or None (default = None)
Maximum rank of the factorized matrices.
Returns
-------
U : numpy-array
Left-singular vectors.
S : numpy-array
Diagonal matrix of singular values.
V : numpy-array
Right-singular vectors.
post : dictionary
Dictionary containing the computed posterior values.
References
----------
.. [1] Nakajima, Shinichi, et al. "Global analytic solution of
fully-observed variational Bayesian matrix factorization." Journal of
Machine Learning Research 14.Jan (2013): 1-37.
.. [2] Nakajima, Shinichi, et al. "Perfect dimensionality recovery by
variational Bayesian PCA." Advances in Neural Information Processing
Systems. 2012.
"""
L, M = Y.shape # has to be L<=M
if H is None:
H = L
alpha = L / M
tauubar = 2.5129 * np.sqrt(alpha)
# SVD of the input matrix, max rank of H
# U, s, V = np.linalg.svd(Y)
U, s, V = torch.svd(Y)
U = U[:, :H]
s = s[:H]
V = V[:H].T
# Calculate residual
residual = 0.
if H < L:
# residual = np.sum(np.sum(Y**2)-np.sum(s**2))
residual = torch.sum(np.sum(Y**2) - np.sum(s**2))
# Estimation of the variance when sigma2 is unspecified
if sigma2 is None:
xubar = (1 + tauubar) * (1 + alpha / tauubar)
eH_ub = int(np.min([np.ceil(L / (1 + alpha)) - 1, H])) - 1
# upper_bound = (np.sum(s**2)+residual)/(L*M)
# lower_bound = np.max(
# [s[eH_ub+1]**2/(M*xubar), np.mean(s[eH_ub+1:]**2)/M])
upper_bound = (torch.sum(s**2) + residual) / (L * M)
lower_bound = torch.max(torch.stack(
[s[eH_ub + 1]**2 / (M * xubar), torch.mean(s[eH_ub + 1:]**2) / M], dim=0))
scale = 1. # /lower_bound
s = s * np.sqrt(scale)
residual = residual * scale
lower_bound = lower_bound * scale
upper_bound = upper_bound * scale
sigma2_opt = minimize_scalar(
EVBsigma2, args=(L, M, s.cpu().numpy(), residual, xubar),
bounds=[lower_bound.cpu().numpy(), upper_bound.cpu().numpy()],
method='Bounded')
sigma2 = sigma2_opt.x
# Threshold gamma term
threshold = np.sqrt(M * sigma2 * (1 + tauubar) * (1 + alpha / tauubar))
# pos = np.sum(s > threshold)
pos = torch.sum(s > threshold)
# Formula (15) from [2]
# d = torch.multiply(s[:pos]/2,
# 1-torch.divide(
# torch.tensor((L+M)*sigma2, device=s.device),
# s[:pos]**2) + torch.sqrt((1-torch.divide(
# torch.tensor(
# (L+M)*sigma2, device=s.device),
# s[:pos]**2))**2 -
# 4*L*M*sigma2**2/s[:pos]**4))
# d = np.multiply(s[:pos]/2, 1-np.divide((L+M)*sigma2, s[:pos]**2) + np.sqrt(
# (1-np.divide((L+M)*sigma2, s[:pos]**2))**2 - 4*L*M*sigma2**2/s[:pos]**4))
d = (s[:pos] / 2) * (1 - (L + M) * sigma2 / s[:pos]**2
+ torch.sqrt((1 -
(L + M) * sigma2 / s[:pos]**2)**2 - 4 * L * M * sigma2**2 / s[:pos]**4))
# Computation of the posterior
# post = {}
# post['ma'] = np.zeros(H)
# post['mb'] = np.zeros(H)
# post['sa2'] = np.zeros(H)
# post['sb2'] = np.zeros(H)
# post['cacb'] = np.zeros(H)
# tau = np.multiply(d, s[:pos])/(M*sigma2)
# delta = np.multiply(np.sqrt(np.divide(M*d, L*s[:pos])), 1+alpha/tau)
# post['ma'][:pos] = np.sqrt(np.multiply(d, delta))
# post['mb'][:pos] = np.sqrt(np.divide(d, delta))
# post['sa2'][:pos] = np.divide(sigma2*delta, s[:pos])
# post['sb2'][:pos] = np.divide(sigma2, np.multiply(delta, s[:pos]))
# post['cacb'][:pos] = np.sqrt(np.multiply(d, s[:pos])/(L*M))
# post['sigma2'] = sigma2
# post['F'] = 0.5*(L*M*np.log(2*np.pi*sigma2) +
# (residual+np.sum(s**2))/sigma2 + np.sum(
# M*np.log(tau+1) + L*np.log(tau/alpha + 1) - M*tau))
return U[:, :pos], torch.diag(d), V[:, :pos] # , post
def EVBsigma2(sigma2, L, M, s, residual, xubar):
H = len(s)
alpha = L / M
x = s**2 / (M * sigma2)
z1 = x[x > xubar]
z2 = x[x <= xubar]
tau_z1 = tau(z1, alpha)
term1 = np.sum(z2 - np.log(z2))
term2 = np.sum(z1 - tau_z1)
term3 = np.sum(np.log(np.divide(tau_z1 + 1, z1)))
term4 = alpha * np.sum(np.log(tau_z1 / alpha + 1))
obj = term1 + term2 + term3 + term4 + residual / (M * sigma2) + (L - H) * np.log(sigma2)
return obj
def phi0(x):
return x - np.log(x)
def phi1(x, alpha):
return np.log(tau(x, alpha) + 1) + alpha * np.log(tau(x, alpha) / alpha + 1
) - tau(x, alpha)
def tau(x, alpha):
return 0.5 * (x - (1 + alpha) + np.sqrt((x - (1 + alpha))**2 - 4 * alpha))
class Metrics:
def __init__(self, params, linear: bool = False) -> None:
'''
parameters: list of torch.nn.Module.parameters()
'''
self.params = params
self.history = list()
mask = list()
for param_idx, param in enumerate(params):
param_shape = param.shape
if not linear:
if len(param_shape) != 4:
mask.append(param_idx)
else:
if len(param_shape) != 4 and len(param_shape) != 2:
mask.append(param_idx)
self.mask = set(mask)
def compute_low_rank(self,
tensor: torch.Tensor,
normalizer: float) -> torch.Tensor:
if tensor.requires_grad:
tensor = tensor.detach()
try:
tensor_size = tensor.shape
if tensor_size[0] > tensor_size[1]:
tensor = tensor.T
U_approx, S_approx, V_approx = EVBMF(tensor)
except RuntimeError:
return None, None, None
rank = S_approx.shape[0] / tensor_size[0] # normalizer
low_rank_eigen = torch.diag(S_approx).data.cpu().numpy()
if len(low_rank_eigen) != 0:
condition = low_rank_eigen[0] / low_rank_eigen[-1]
sum_low_rank_eigen = low_rank_eigen / \
max(low_rank_eigen)
sum_low_rank_eigen = np.sum(sum_low_rank_eigen)
else:
condition = 0
sum_low_rank_eigen = 0
KG = sum_low_rank_eigen / tensor_size[0] # normalizer
return rank, KG, condition
def KG(self, epoch: int) -> np.ndarray:
KG_list = list()
for i, (index, metric) in enumerate(self.history[epoch]):
if isinstance(metric, ConvLayerMetrics):
KG_list.append((metric.input_channel.KG
+ metric.output_channel.KG) / 2)
elif isinstance(metric, LayerMetrics):
KG_list.append(metric.KG)
return np.array(KG_list)
def __call__(self) -> List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]]:
'''
Computes the knowledge gain (S) and mapping condition (condition)
'''
metrics: List[Tuple[int, Union[LayerMetrics,
ConvLayerMetrics]]] = list()
for layer_index, layer in enumerate(self.params):
if layer_index in self.mask:
metrics.append((layer_index, None))
continue
# if np.less(np.prod(layer.shape), 10_000):
# metrics.append((layer_index, None))
if len(layer.shape) == 4:
layer_tensor = layer.data
tensor_size = layer_tensor.shape
mode_3_unfold = layer_tensor.permute(1, 0, 2, 3)
mode_3_unfold = torch.reshape(
mode_3_unfold, [tensor_size[1], tensor_size[0]
* tensor_size[2] * tensor_size[3]])
mode_4_unfold = layer_tensor
mode_4_unfold = torch.reshape(
mode_4_unfold, [tensor_size[0], tensor_size[1]
* tensor_size[2] * tensor_size[3]])
in_rank, in_KG, in_condition = self.compute_low_rank(
mode_3_unfold, tensor_size[1])
if in_rank is None and in_KG is None and in_condition is None:
if len(self.history) > 0:
in_rank = self.history[-1][
layer_index][1].input_channel.rank
in_KG = self.history[-1][
layer_index][1].input_channel.KG
in_condition = self.history[-1][
layer_index][1].input_channel.condition
else:
in_rank = in_KG = in_condition = 0.
out_rank, out_KG, out_condition = self.compute_low_rank(
mode_4_unfold, tensor_size[0])
if out_rank is None and out_KG is None and out_condition is None:
if len(self.history) > 0:
out_rank = self.history[-1][
layer_index][1].output_channel.rank
out_KG = self.history[-1][
layer_index][1].output_channel.KG
out_condition = self.history[-1][
layer_index][1].output_channel.condition
else:
out_rank = out_KG = out_condition = 0.
metrics.append((layer_index, ConvLayerMetrics(
input_channel=LayerMetrics(
rank=in_rank,
KG=in_KG,
condition=in_condition),
output_channel=LayerMetrics(
rank=out_rank,
KG=out_KG,
condition=out_condition))))
elif len(layer.shape) == 2:
rank, KG, condition = self.compute_low_rank(
layer, layer.shape[0])
if rank is None and KG is None and condition is None:
if len(self.history) > 0:
rank = self.history[-1][layer_index][1].rank
KG = self.history[-1][layer_index][1].KG
condition = self.history[-1][layer_index][1].condition
else:
rank = KG = condition = 0.
metrics.append((layer_index, LayerMetrics(
rank=rank,
KG=KG,
condition=condition)))
else:
metrics.append((layer_index, None))
self.history.append(metrics)
return metrics
class Adas(Optimizer):
"""
Vectorized SGD from torch.optim.SGD
"""
def __init__(self,
params,
lr: float = required,
beta: float = 0.8,
step_size: int = None,
linear: bool = True,
gamma: float = 1,
momentum: float = 0,
dampening: float = 0,
weight_decay: float = 0,
nesterov: bool = False):
if lr is not required and lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if weight_decay < 0.0:
raise ValueError(
"Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
weight_decay=weight_decay, nesterov=nesterov)
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError(
"Nesterov momentum requires a momentum and zero dampening")
super(Adas, self).__init__(params[:2], defaults)
# Adas Specific stuff (not SGD)
if np.less(beta, 0) or np.greater_equal(beta, 1):
raise ValueError(f'Invalid beta: {beta}')
if np.less(gamma, 0):
raise ValueError(f'Invalid gamma: {gamma}')
if step_size is not None:
if np.less_equal(step_size, 0):
raise ValueError(f'Invalid step_size: {step_size}')
self.step_size = step_size
self.gamma = gamma
self.beta = beta
self.metrics = metrics = Metrics(params=params[2]["all_params"], linear=linear)
self.lr_vector = np.repeat(a=lr, repeats=len(metrics.params))
self.velocity = np.zeros(
len(self.metrics.params) - len(self.metrics.mask))
self.not_ready = list(range(len(self.velocity)))
self.init_lr = lr
self.zeta = 1.
self.KG = 0.
def __setstate__(self, state):
super(Adas, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def epoch_step(self, epoch: int) -> None:
self.metrics()
if epoch == 0:
velocity = self.init_lr * np.ones(len(self.velocity))
self.KG = self.metrics.KG(epoch)
else:
KG = self.metrics.KG(epoch)
velocity = KG - self.KG
self.KG = KG
for idx in self.not_ready:
if np.isclose(KG[idx], 0.):
velocity[idx] = self.init_lr - \
self.beta * self.velocity[idx]
else:
self.not_ready.remove(idx)
if self.step_size is not None:
if epoch % self.step_size == 0 and epoch > 0:
self.lr_vector *= self.gamma
self.zeta *= self.gamma
self.velocity = np.maximum(
self.beta * self.velocity + self.zeta * velocity, 0.)
count = 0
for i in range(len(self.metrics.params)):
if i in self.metrics.mask:
self.lr_vector[i] = self.lr_vector[i - (1 if i > 0 else 0)]
else:
self.lr_vector[i] = self.velocity[count]
count += 1
def step(self, closure: callable = None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
iteration_group = 0
for group in self.param_groups:
iteration_group += 1
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p_index, p in enumerate(group['params']):
if p.grad is None:
continue
d_p = p.grad.data
if weight_decay != 0:
d_p.add_(p.data, alpha=weight_decay)
if momentum != 0:
param_state = self.state[p]
if 'momentum_buffer' not in param_state:
buf = param_state['momentum_buffer'] = torch.clone(
d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
# p.data.add_(-group['lr'], d_p)
p.data.add_(d_p, alpha=-self.lr_vector[p_index])
return loss
| 36.314346 | 111 | 0.521815 | [
"Apache-2.0"
] | MathieuTuli/transformers | src/transformers/adas.py | 17,213 | Python |
"""Unit tests for tftpy."""
import unittest
import logging
import tftpy
import os
import time
import threading
from errno import EINTR
from multiprocessing import Queue
log = tftpy.log
class TestTftpyClasses(unittest.TestCase):
def setUp(self):
tftpy.setLogLevel(logging.DEBUG)
def testTftpPacketRRQ(self):
log.debug("===> Running testcase testTftpPacketRRQ")
options = {}
rrq = tftpy.TftpPacketRRQ()
rrq.filename = 'myfilename'
rrq.mode = 'octet'
rrq.options = options
rrq.encode()
self.assert_(rrq.buffer != None, "Buffer populated")
rrq.decode()
self.assertEqual(rrq.filename, b"myfilename", "Filename correct")
self.assertEqual(rrq.mode, b"octet", "Mode correct")
self.assertEqual(rrq.options, options, "Options correct")
# repeat test with options
rrq.options = { 'blksize': '1024' }
rrq.filename = 'myfilename'
rrq.mode = 'octet'
rrq.encode()
self.assert_(rrq.buffer != None, "Buffer populated")
rrq.decode()
self.assertEqual(rrq.filename, b"myfilename", "Filename correct")
self.assertEqual(rrq.mode, b"octet", "Mode correct")
self.assertEqual(rrq.options['blksize'], '1024', "Blksize correct")
def testTftpPacketWRQ(self):
log.debug("===> Running test case testTftpPacketWRQ")
options = {}
wrq = tftpy.TftpPacketWRQ()
wrq.filename = 'myfilename'
wrq.mode = 'octet'
wrq.options = options
wrq.encode()
self.assert_(wrq.buffer != None, "Buffer populated")
wrq.decode()
self.assertEqual(wrq.opcode, 2, "Opcode correct")
self.assertEqual(wrq.filename, b"myfilename", "Filename correct")
self.assertEqual(wrq.mode, b"octet", "Mode correct")
self.assertEqual(wrq.options, options, "Options correct")
# repeat test with options
wrq.options = { 'blksize': '1024' }
wrq.filename = 'myfilename'
wrq.mode = 'octet'
wrq.encode()
self.assert_(wrq.buffer != None, "Buffer populated")
wrq.decode()
self.assertEqual(wrq.opcode, 2, "Opcode correct")
self.assertEqual(wrq.filename, b"myfilename", "Filename correct")
self.assertEqual(wrq.mode, b"octet", "Mode correct")
self.assertEqual(wrq.options['blksize'], '1024', "Blksize correct")
def testTftpPacketDAT(self):
log.debug("===> Running testcase testTftpPacketDAT")
dat = tftpy.TftpPacketDAT()
dat.blocknumber = 5
data = "this is some data"
dat.data = data
dat.encode()
self.assert_(dat.buffer != None, "Buffer populated")
dat.decode()
self.assertEqual(dat.opcode, 3, "DAT opcode is correct")
self.assertEqual(dat.blocknumber, 5, "Block number is correct")
self.assertEqual(dat.data, data, "DAT data is correct")
def testTftpPacketACK(self):
log.debug("===> Running testcase testTftpPacketACK")
ack = tftpy.TftpPacketACK()
ack.blocknumber = 6
ack.encode()
self.assert_(ack.buffer != None, "Buffer populated")
ack.decode()
self.assertEqual(ack.opcode, 4, "ACK opcode is correct")
self.assertEqual(ack.blocknumber, 6, "ACK blocknumber correct")
def testTftpPacketERR(self):
log.debug("===> Running testcase testTftpPacketERR")
err = tftpy.TftpPacketERR()
err.errorcode = 4
err.encode()
self.assert_(err.buffer != None, "Buffer populated")
err.decode()
self.assertEqual(err.opcode, 5, "ERR opcode is correct")
self.assertEqual(err.errorcode, 4, "ERR errorcode is correct")
def testTftpPacketOACK(self):
log.debug("===> Running testcase testTftpPacketOACK")
oack = tftpy.TftpPacketOACK()
# Test that if we make blksize a number, it comes back a string.
oack.options = { 'blksize': 2048 }
oack.encode()
self.assert_(oack.buffer != None, "Buffer populated")
oack.decode()
self.assertEqual(oack.opcode, 6, "OACK opcode is correct")
self.assertEqual(oack.options['blksize'],
'2048',
"OACK blksize option is correct")
# Test string to string
oack.options = { 'blksize': '4096' }
oack.encode()
self.assert_(oack.buffer != None, "Buffer populated")
oack.decode()
self.assertEqual(oack.opcode, 6, "OACK opcode is correct")
self.assertEqual(oack.options['blksize'],
'4096',
"OACK blksize option is correct")
def testTftpPacketFactory(self):
log.debug("===> Running testcase testTftpPacketFactory")
# Make sure that the correct class is created for the correct opcode.
classes = {
1: tftpy.TftpPacketRRQ,
2: tftpy.TftpPacketWRQ,
3: tftpy.TftpPacketDAT,
4: tftpy.TftpPacketACK,
5: tftpy.TftpPacketERR,
6: tftpy.TftpPacketOACK
}
factory = tftpy.TftpPacketFactory()
for opcode in classes:
self.assert_(isinstance(factory._TftpPacketFactory__create(opcode),
classes[opcode]),
"opcode %d returns the correct class" % opcode)
class TestTftpyState(unittest.TestCase):
def setUp(self):
tftpy.setLogLevel(logging.DEBUG)
def clientServerUploadOptions(self,
options,
input=None,
transmitname=None,
server_kwargs=None):
"""Fire up a client and a server and do an upload."""
root = '/tmp'
home = os.path.dirname(os.path.abspath(__file__))
filename = '640KBFILE'
input_path = os.path.join(home, filename)
if not input:
input = input_path
if transmitname:
filename = transmitname
server_kwargs = server_kwargs or {}
server = tftpy.TftpServer(root, **server_kwargs)
client = tftpy.TftpClient('localhost',
20001,
options)
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
# parent - let the server start
try:
time.sleep(1)
client.upload(filename,
input)
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
else:
server.listen('localhost', 20001)
def clientServerDownloadOptions(self, options, output='/tmp/out'):
"""Fire up a client and a server and do a download."""
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient('localhost',
20001,
options)
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
# parent - let the server start
try:
time.sleep(1)
client.download('640KBFILE',
output)
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
else:
server.listen('localhost', 20001)
def testClientServerNoOptions(self):
self.clientServerDownloadOptions({})
def testClientServerTsizeOptions(self):
self.clientServerDownloadOptions({'tsize': 64*1024})
def testClientFileObject(self):
output = open('/tmp/out', 'w')
self.clientServerDownloadOptions({}, output)
def testClientServerBlksize(self):
for blksize in [512, 1024, 2048, 4096]:
self.clientServerDownloadOptions({'blksize': blksize})
def testClientServerUploadNoOptions(self):
self.clientServerUploadOptions({})
def testClientServerUploadFileObj(self):
fileobj = open('t/640KBFILE', 'r')
self.clientServerUploadOptions({}, input=fileobj)
def testClientServerUploadWithSubdirs(self):
self.clientServerUploadOptions({}, transmitname='foo/bar/640KBFILE')
def testClientServerUploadStartingSlash(self):
self.clientServerUploadOptions({}, transmitname='/foo/bar/640KBFILE')
def testClientServerUploadOptions(self):
for blksize in [512, 1024, 2048, 4096]:
self.clientServerUploadOptions({'blksize': blksize})
def customUploadHelper(self, return_func):
q = Queue()
def upload_open(path, context):
q.put('called')
return return_func(path)
self.clientServerUploadOptions(
{},
server_kwargs={'upload_open': upload_open})
self.assertEqual(q.get(True, 1), 'called')
def testClientServerUploadCustomOpen(self):
self.customUploadHelper(lambda p: open(p, 'wb'))
def testClientServerUploadCustomOpenForbids(self):
with self.assertRaisesRegexp(tftpy.TftpException, 'Access violation'):
self.customUploadHelper(lambda p: None)
def testClientServerUploadTsize(self):
self.clientServerUploadOptions({'tsize': 64*1024}, transmitname='/foo/bar/640KBFILE')
def testClientServerNoOptionsDelay(self):
tftpy.TftpStates.DELAY_BLOCK = 10
self.clientServerDownloadOptions({})
tftpy.TftpStates.DELAY_BLOCK = 0
def testServerNoOptions(self):
raddress = '127.0.0.2'
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
# Testing without the dyn_func_file set.
serverstate = tftpy.TftpContextServer(raddress,
rport,
timeout,
root)
self.assertTrue( isinstance(serverstate,
tftpy.TftpContextServer) )
rrq = tftpy.TftpPacketRRQ()
rrq.filename = '640KBFILE'
rrq.mode = 'octet'
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# At a 512 byte blocksize, this should be 1280 packets exactly.
for block in range(1, 1281):
# Should be in expectack state.
self.assertTrue( isinstance(serverstate.state,
tftpy.TftpStateExpectACK) )
ack = tftpy.TftpPacketACK()
ack.blocknumber = block % 65536
serverstate.state = serverstate.state.handle(ack, raddress, rport)
# The last DAT packet should be empty, indicating a completed
# transfer.
ack = tftpy.TftpPacketACK()
ack.blocknumber = 1281 % 65536
finalstate = serverstate.state.handle(ack, raddress, rport)
self.assertTrue( finalstate is None )
def testServerNoOptionsSubdir(self):
raddress = '127.0.0.2'
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
# Testing without the dyn_func_file set.
serverstate = tftpy.TftpContextServer(raddress,
rport,
timeout,
root)
self.assertTrue( isinstance(serverstate,
tftpy.TftpContextServer) )
rrq = tftpy.TftpPacketRRQ()
rrq.filename = '640KBFILE'
rrq.mode = 'octet'
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# At a 512 byte blocksize, this should be 1280 packets exactly.
for block in range(1, 1281):
# Should be in expectack state, or None
self.assertTrue( isinstance(serverstate.state,
tftpy.TftpStateExpectACK) )
ack = tftpy.TftpPacketACK()
ack.blocknumber = block % 65536
serverstate.state = serverstate.state.handle(ack, raddress, rport)
# The last DAT packet should be empty, indicating a completed
# transfer.
ack = tftpy.TftpPacketACK()
ack.blocknumber = 1281 % 65536
finalstate = serverstate.state.handle(ack, raddress, rport)
self.assertTrue( finalstate is None )
def testServerInsecurePath(self):
raddress = '127.0.0.2'
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
serverstate = tftpy.TftpContextServer(raddress,
rport,
timeout,
root)
rrq = tftpy.TftpPacketRRQ()
rrq.filename = '../setup.py'
rrq.mode = 'octet'
rrq.options = {}
# Start the download.
self.assertRaises(tftpy.TftpException,
serverstate.start, rrq.encode().buffer)
def testServerSecurePath(self):
raddress = '127.0.0.2'
rport = 10000
timeout = 5
root = os.path.dirname(os.path.abspath(__file__))
serverstate = tftpy.TftpContextServer(raddress,
rport,
timeout,
root)
rrq = tftpy.TftpPacketRRQ()
rrq.filename = '640KBFILE'
rrq.mode = 'octet'
rrq.options = {}
# Start the download.
serverstate.start(rrq.encode().buffer)
# Should be in expectack state.
self.assertTrue(isinstance(serverstate.state,
tftpy.TftpStateExpectACK))
def testServerDownloadWithStopNow(self, output='/tmp/out'):
log.debug("===> Running testcase testServerDownloadWithStopNow")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient('localhost',
20001,
{})
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
try:
# parent - let the server start
stopped_early = False
time.sleep(1)
def delay_hook(pkt):
time.sleep(0.005) # 5ms
client.download('640KBFILE', output, delay_hook)
except:
log.warn("client threw exception as expected")
stopped_early = True
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
self.assertTrue( stopped_early == True,
"Server should not exit early" )
else:
import signal
def handlealarm(signum, frame):
server.stop(now=True)
signal.signal(signal.SIGALRM, handlealarm)
signal.alarm(2)
try:
server.listen('localhost', 20001)
log.error("server didn't throw exception")
except Exception as err:
log.error("server got unexpected exception %s" % err)
# Wait until parent kills us
while True:
time.sleep(1)
def testServerDownloadWithStopNotNow(self, output='/tmp/out'):
log.debug("===> Running testcase testServerDownloadWithStopNotNow")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
client = tftpy.TftpClient('localhost',
20001,
{})
# Fork a server and run the client in this process.
child_pid = os.fork()
if child_pid:
try:
stopped_early = True
# parent - let the server start
time.sleep(1)
def delay_hook(pkt):
time.sleep(0.005) # 5ms
client.download('640KBFILE', output, delay_hook)
stopped_early = False
except:
log.warn("client threw exception as expected")
finally:
os.kill(child_pid, 15)
os.waitpid(child_pid, 0)
self.assertTrue( stopped_early == False,
"Server should not exit early" )
else:
import signal
def handlealarm(signum, frame):
server.stop(now=False)
signal.signal(signal.SIGALRM, handlealarm)
signal.alarm(2)
try:
server.listen('localhost', 20001)
except Exception as err:
log.error("server threw exception %s" % err)
# Wait until parent kills us
while True:
time.sleep(1)
def testServerDownloadWithDynamicPort(self, output='/tmp/out'):
log.debug("===> Running testcase testServerDownloadWithDynamicPort")
root = os.path.dirname(os.path.abspath(__file__))
server = tftpy.TftpServer(root)
server_thread = threading.Thread(target=server.listen,
kwargs={'listenip': 'localhost',
'listenport': 0})
server_thread.start()
try:
server.is_running.wait()
client = tftpy.TftpClient('localhost', server.listenport, {})
time.sleep(1)
client.download('640KBFILE',
output)
finally:
server.stop(now=False)
server_thread.join()
class TestTftpyLoggers(unittest.TestCase):
def setUp(self):
tftpy.setLogLevel(logging.DEBUG)
def testStreamLogger(self):
# Not sure how best to test this. Maybe configure the loggers and look
# for any complaints.
try:
tftpy.addHandler(tftpy.create_streamhandler())
self.assertTrue( True )
except:
self.assertTrue( False )
def testFileLogger(self):
# Same as previous.
try:
tftpy.addHandler(tftpy.create_rotatingfilehandler('/tmp/log'))
self.assertTrue( True )
except:
self.assertTrue( False )
if __name__ == '__main__':
unittest.main()
| 37.277445 | 93 | 0.557775 | [
"MIT"
] | mapcollab/python-tftpy | t/test.py | 18,676 | Python |
r"""
Early Stopping
^^^^^^^^^^^^^^
Monitor a validation metric and stop training when it stops improving.
"""
from copy import deepcopy
import numpy as np
import torch
import torch.distributed as dist
from pytorch_lightning import _logger as log
from pytorch_lightning.callbacks.base import Callback
from pytorch_lightning.utilities import rank_zero_warn
torch_inf = torch.tensor(np.Inf)
try:
import torch_xla
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class EarlyStopping(Callback):
r"""
Args:
monitor: quantity to be monitored. Default: ``'val_loss'``.
.. note:: Has no effect when using `EvalResult` or `TrainResult`
min_delta: minimum change in the monitored quantity
to qualify as an improvement, i.e. an absolute
change of less than `min_delta`, will count as no
improvement. Default: ``0.0``.
patience: number of validation epochs with no improvement
after which training will be stopped. Default: ``3``.
verbose: verbosity mode. Default: ``False``.
mode: one of {auto, min, max}. In `min` mode,
training will stop when the quantity
monitored has stopped decreasing; in `max`
mode it will stop when the quantity
monitored has stopped increasing; in `auto`
mode, the direction is automatically inferred
from the name of the monitored quantity. Default: ``'auto'``.
strict: whether to crash the training if `monitor` is
not found in the validation metrics. Default: ``True``.
Example::
>>> from pytorch_lightning import Trainer
>>> from pytorch_lightning.callbacks import EarlyStopping
>>> early_stopping = EarlyStopping('val_loss')
>>> trainer = Trainer(early_stop_callback=early_stopping)
"""
mode_dict = {
'min': torch.lt,
'max': torch.gt,
}
def __init__(self, monitor: str = 'val_loss', min_delta: float = 0.0, patience: int = 3,
verbose: bool = False, mode: str = 'auto', strict: bool = True):
super().__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.strict = strict
self.min_delta = min_delta
self.wait_count = 0
self.stopped_epoch = 0
self.mode = mode
if mode not in self.mode_dict:
if self.verbose > 0:
log.info(f'EarlyStopping mode {mode} is unknown, fallback to auto mode.')
self.mode = 'auto'
if self.mode == 'auto':
if self.monitor == 'acc':
self.mode = 'max'
else:
self.mode = 'min'
if self.verbose > 0:
log.info(f'EarlyStopping mode set to {self.mode} for monitoring {self.monitor}.')
self.min_delta *= 1 if self.monitor_op == torch.gt else -1
self.best_score = torch_inf if self.monitor_op == torch.lt else -torch_inf
def _validate_condition_metric(self, logs):
monitor_val = logs.get(self.monitor)
error_msg = (f'Early stopping conditioned on metric `{self.monitor}`'
f' which is not available. Either add `{self.monitor}` to the return of '
f' validation_epoch end or modify your EarlyStopping callback to use any of the '
f'following: `{"`, `".join(list(logs.keys()))}`')
if monitor_val is None:
if self.strict:
raise RuntimeError(error_msg)
if self.verbose > 0:
rank_zero_warn(error_msg, RuntimeWarning)
return False
return True
@property
def monitor_op(self):
return self.mode_dict[self.mode]
def state_dict(self):
return {
'wait_count': self.wait_count,
'stopped_epoch': self.stopped_epoch,
'best_score': self.best_score,
'patience': self.patience
}
def load_state_dict(self, state_dict):
state_dict = deepcopy(state_dict)
self.wait_count = state_dict['wait_count']
self.stopped_epoch = state_dict['stopped_epoch']
self.best_score = state_dict['best_score']
self.patience = state_dict['patience']
def on_validation_end(self, trainer, pl_module):
self._run_early_stopping_check(trainer, pl_module)
def on_validation_epoch_end(self, trainer, pl_module):
val_es_key = 'val_early_stop_on'
if trainer.callback_metrics.get(val_es_key) is not None:
self.monitor = val_es_key
# disable strict checking when using structured results
if val_es_key in trainer.callback_metrics:
self.strict = False
self._validate_condition_metric(trainer.callback_metrics)
def on_train_epoch_end(self, trainer, pl_module):
# disable early stopping in train loop when there's a val loop
if self.monitor == 'val_early_stop_on':
return
# early stopping can also work in the train loop when there is no val loop and when using structured results
should_check_early_stop = False
train_es_key = 'early_stop_on'
if trainer.callback_metrics.get(train_es_key, None) is not None:
self.monitor = train_es_key
should_check_early_stop = True
if should_check_early_stop:
self._run_early_stopping_check(trainer, pl_module)
def _run_early_stopping_check(self, trainer, pl_module):
logs = trainer.callback_metrics
if not self._validate_condition_metric(logs):
return # short circuit if metric not present
current = logs.get(self.monitor)
# when in dev debugging
trainer.dev_debugger.track_early_stopping_history(current)
if not isinstance(current, torch.Tensor):
current = torch.tensor(current, device=pl_module.device)
if trainer.use_tpu and XLA_AVAILABLE:
current = current.cpu()
if self.monitor_op(current - self.min_delta, self.best_score):
self.best_score = current
self.wait_count = 0
else:
self.wait_count += 1
should_stop = self.wait_count >= self.patience
if bool(should_stop):
self.stopped_epoch = trainer.current_epoch
trainer.should_stop = True
# stop every ddp process if any world process decides to stop
self._stop_distributed_training(trainer, pl_module)
def _stop_distributed_training(self, trainer, pl_module):
# in ddp make sure all processes stop when one is flagged
if trainer.use_ddp or trainer.use_ddp2:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device)
dist.all_reduce(stop, op=dist.reduce_op.SUM)
dist.barrier()
trainer.should_stop = stop == trainer.world_size
if trainer.use_tpu:
stop = torch.tensor(int(trainer.should_stop), device=pl_module.device, dtype=torch.int32)
stop = xm.mesh_reduce("stop_signal", stop, torch.cat)
torch_xla.core.xla_model.rendezvous("pl.EarlyStoppingCallback.stop_distributed_training_check")
trainer.should_stop = int(stop.item()) == trainer.world_size
def on_train_end(self, trainer, pl_module):
if self.stopped_epoch > 0 and self.verbose > 0:
rank_zero_warn('Displayed epoch numbers by `EarlyStopping` start from "1" until v0.6.x,'
' but will start from "0" in v0.8.0.', DeprecationWarning)
log.info(f'Epoch {self.stopped_epoch + 1:05d}: early stopping triggered.')
| 37.442308 | 116 | 0.635336 | [
"Apache-2.0"
] | DavianYang/pytorch-lightning | pytorch_lightning/callbacks/early_stopping.py | 7,788 | Python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v4.resources.types import ad_group
from google.ads.googleads.v4.services.types import ad_group_service
from .base import AdGroupServiceTransport, DEFAULT_CLIENT_INFO
class AdGroupServiceGrpcTransport(AdGroupServiceTransport):
"""gRPC backend transport for AdGroupService.
Service to manage ad groups.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host, credentials=credentials, client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_ad_group(
self,
) -> Callable[[ad_group_service.GetAdGroupRequest], ad_group.AdGroup]:
r"""Return a callable for the get ad group method over gRPC.
Returns the requested ad group in full detail.
Returns:
Callable[[~.GetAdGroupRequest],
~.AdGroup]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_ad_group" not in self._stubs:
self._stubs["get_ad_group"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.AdGroupService/GetAdGroup",
request_serializer=ad_group_service.GetAdGroupRequest.serialize,
response_deserializer=ad_group.AdGroup.deserialize,
)
return self._stubs["get_ad_group"]
@property
def mutate_ad_groups(
self,
) -> Callable[
[ad_group_service.MutateAdGroupsRequest],
ad_group_service.MutateAdGroupsResponse,
]:
r"""Return a callable for the mutate ad groups method over gRPC.
Creates, updates, or removes ad groups. Operation
statuses are returned.
Returns:
Callable[[~.MutateAdGroupsRequest],
~.MutateAdGroupsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "mutate_ad_groups" not in self._stubs:
self._stubs["mutate_ad_groups"] = self.grpc_channel.unary_unary(
"/google.ads.googleads.v4.services.AdGroupService/MutateAdGroups",
request_serializer=ad_group_service.MutateAdGroupsRequest.serialize,
response_deserializer=ad_group_service.MutateAdGroupsResponse.deserialize,
)
return self._stubs["mutate_ad_groups"]
__all__ = ("AdGroupServiceGrpcTransport",)
| 41.169118 | 90 | 0.621361 | [
"Apache-2.0"
] | batardo/google-ads-python | google/ads/googleads/v4/services/services/ad_group_service/transports/grpc.py | 11,198 | Python |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/72_callback.neptune.ipynb (unless otherwise specified).
__all__ = ['NeptuneCallback']
# Cell
import tempfile
from ..basics import *
from ..learner import Callback
# Cell
import neptune
# Cell
class NeptuneCallback(Callback):
"Log losses, metrics, model weights, model architecture summary to neptune"
order = Recorder.order+1
def __init__(self, log_model_weights=True, keep_experiment_running=False):
self.log_model_weights = log_model_weights
self.keep_experiment_running = keep_experiment_running
self.experiment = None
if neptune.project is None:
raise ValueError('You did not initialize project in neptune.\n',
'Please invoke `neptune.init("USERNAME/PROJECT_NAME")` before this callback.')
def before_fit(self):
try:
self.experiment = neptune.get_experiment()
except ValueError:
print('No active experiment. Please invoke `neptune.create_experiment()` before this callback.')
try:
self.experiment.set_property('n_epoch', str(self.learn.n_epoch))
self.experiment.set_property('model_class', str(type(self.learn.model)))
except: print(f'Did not log all properties. Check properties in the {neptune.get_experiment()}.')
try:
with tempfile.NamedTemporaryFile(mode='w') as f:
with open(f.name, 'w') as g: g.write(repr(self.learn.model))
self.experiment.log_artifact(f.name, 'model_summary.txt')
except: print('Did not log model summary. Check if your model is PyTorch model.')
if self.log_model_weights and not hasattr(self.learn, 'save_model'):
print('Unable to log model to Neptune.\n',
'Use "SaveModelCallback" to save model checkpoints that will be logged to Neptune.')
def after_batch(self):
# log loss and opt.hypers
if self.learn.training:
self.experiment.log_metric('batch__smooth_loss', self.learn.smooth_loss)
self.experiment.log_metric('batch__loss', self.learn.loss)
self.experiment.log_metric('batch__train_iter', self.learn.train_iter)
for i, h in enumerate(self.learn.opt.hypers):
for k, v in h.items(): self.experiment.log_metric(f'batch__opt.hypers.{k}', v)
def after_epoch(self):
# log metrics
for n, v in zip(self.learn.recorder.metric_names, self.learn.recorder.log):
if n not in ['epoch', 'time']: self.experiment.log_metric(f'epoch__{n}', v)
if n == 'time': self.experiment.log_text(f'epoch__{n}', str(v))
# log model weights
if self.log_model_weights and hasattr(self.learn, 'save_model'):
if self.learn.save_model.every_epoch:
_file = join_path_file(f'{self.learn.save_model.fname}_{self.learn.save_model.epoch}',
self.learn.path / self.learn.model_dir, ext='.pth')
else:
_file = join_path_file(self.learn.save_model.fname,
self.learn.path / self.learn.model_dir, ext='.pth')
self.experiment.log_artifact(_file)
def after_fit(self):
if not self.keep_experiment_running:
try: self.experiment.stop()
except: print('No neptune experiment to stop.')
else:
print(f'Your experiment (id: {self.experiment.id}, name: {self.experiment.name}) is left in the running state.\n',
'You can log more data to it, like this: `neptune.log_metric()`') | 46.75641 | 126 | 0.639155 | [
"Apache-2.0"
] | Aky87/fastai | fastai/callback/neptune.py | 3,647 | Python |
'''
Module:
Set regular or irregular axis ticks for a plot.
'''
from module_utility import *
import numpy as np
import matplotlib.pyplot as plt
# ticks : contains irregular ticks locations
# tickbeg : regular major ticks begin location
# tickend : regular major ticks end location
# tickd : regular major ticks interval
# mtick : number of minor tick intervals betwen two major ticks
# xbeg : axis begin location
# xend : axis end location
# ns : number of points to plot
# d : interval between two points
# axislen : apparent axis length
def define_tick(ticks, tickbeg, tickend, tickd, mtick, xbeg, xend, ns, d, axislen, format, extend=False):
# regular ticks
if ticks is None:
# major tick interval
if tickd is None:
tick_interval = nice((xend - xbeg) / 5.0)
if tick_interval == 0:
tick_interval = 1.0e10
else:
tick_interval = float(tickd)
# tick begin location
if tickbeg is None:
tick_beg = nice(xbeg)
base = 0.5
nb = 0
if tick_interval > 0:
while nb <= 10 and tick_beg > xbeg + tick_interval:
base = base / 10.0
tick_beg = nice(xbeg, base)
nb = nb + 1
else:
while nb <= 10 and tick_beg < xbeg + tick_interval:
base = base / 10.0
tick_beg = nice(xbeg, base)
nb = nb + 1
else:
tick_beg = float(tickbeg)
# tick end location
if tickend is None:
tick_end = tick_beg + (round((xend - xbeg) / tick_interval) + 2) * tick_interval
if tick_interval > 0:
while tick_end < xend:
tick_end = tick_end + abs(tick_interval)
else:
while tick_end > xend:
tick_end = tick_end - abs(tick_interval)
else:
tick_end = float(tickend)
# regular major and minor tick locations
tick = np.arange(tick_beg, tick_end + 0.1 * abs(tick_interval), tick_interval)
minor_tick_interval = tick_interval / (mtick + 1.0)
minor_tick = np.arange(tick_beg, tick_end + 0.1 * abs(minor_tick_interval), minor_tick_interval)
# some ticks might out of axis range, therefore remove them if strict
if not extend:
if d > 0:
tick = np.asarray([i for i in tick if i >= xbeg and i <= xend])
minor_tick = np.asarray(
[i for i in minor_tick if i >= xbeg and i <= xend and (not i in tick)])
if d < 0:
tick = np.asarray([i for i in tick if i <= xbeg and i >= xend])
minor_tick = np.asarray(
[i for i in minor_tick if i <= xbeg and i >= xend and (not i in tick)])
# linearly scale the ticks to figure canvas
if ns == 1:
# if only one sample point, then tick location is 0.5
tick_location = np.asarray([0.5])
ntick = 1
else:
# if multiple sample points, then scale to apparent axis length
tick_location = [(i - xbeg + 0.5 * d) / ((ns - 1) * d) * axislen for i in tick]
minor_tick_location = [(i - xbeg + 0.5 * d) / ((ns - 1) * d) * axislen for i in minor_tick]
t = tick_location
# set major tick location and labels, note some major ticks might be out of axis range
tl = []
tick_label = []
for i in range(0, len(tick)):
if extend or ((not extend) and tick_location[i] >= 0 and tick_location[i] <= axislen + 1.0e-10):
tl.append(tick_location[i])
if format == 'sci' or format == 'plain':
tick_label.append(('%f' % tick[i]).rstrip('0').rstrip('.'))
else:
tick_label.append((format % tick[i]))
tick_location = tl
# irregular ticks
else:
# get contents from user-specified ticks
ticks = ticks[0].split(',')
location = [0 for i in range(0, len(ticks))]
label = ['' for i in range(0, len(ticks))]
# set tick locations
for i in range(0, len(ticks)):
t = ticks[i].split(':')
location[i] = (float(t[0]) + 0.5 * d) / ((ns - 1) * d) * axislen
label[i] = t[1]
# sort according to tick location
yx = list(zip(location, label))
yx.sort()
tick_location = [location for location, label in yx]
tick_label = [label for location, label in yx]
# minor ticks
if mtick != 0:
mtick = mtick + 1
minor_tick_location = np.linspace(tick_location[0], tick_location[1], mtick + 1)
minor_tick_location = minor_tick_location[1:mtick]
for i in range(1, len(tick_location) - 1):
t = np.linspace(tick_location[i], tick_location[i + 1], mtick + 1)
minor_tick_location = np.append(minor_tick_location, t[1:mtick])
else:
minor_tick_location = []
# return major tick location, major tick label and minor tick location
return tick_location, tick_label, minor_tick_location
def set_tick(args,
font,
x1beg,
x1end,
n1beg,
n1end,
d1,
axis1len,
x2beg,
x2end,
n2beg,
n2end,
d2,
axis2len,
extend=False):
ax = plt.gca()
label_1_size = float(args.label1size)
label_2_size = float(args.label2size)
xlabel = ax.set_xlabel(args.label2, fontsize=label_2_size, labelpad=float(args.label2pad)*72*2)
ylabel = ax.set_ylabel(args.label1, fontsize=label_1_size, labelpad=float(args.label1pad)*72*2)
l = ax.yaxis.get_label()
l.set_fontproperties(font)
l.set_fontsize(label_1_size)
l = ax.xaxis.get_label()
l.set_fontproperties(font)
l.set_fontsize(label_2_size)
if args.label2loc is not None:
ax.xaxis.set_label_position(args.label2loc)
else:
if args.ticktop:
ax.xaxis.set_label_position('top')
else:
ax.xaxis.set_label_position('bottom')
if args.label1loc is not None:
ax.yaxis.set_label_position(args.label1loc)
else:
if args.tickleft:
ax.yaxis.set_label_position('left')
else:
ax.yaxis.set_label_position('right')
ylabel.set_rotation(270)
# ticks on/off
ax.get_yaxis().set_tick_params(which='both', direction='out')
ax.get_xaxis().set_tick_params(which='both', direction='out')
plt.tick_params(
axis='x', # changes apply to the x1-axis
which='both', # both major and minor ticks are affected
bottom=args.tickbottom, # ticks along the bottom axis
top=args.ticktop, # ticks along the top axis
labelbottom=args.tickbottom, # labels along the bottom axis
labeltop=args.ticktop) # labels along the top axis
plt.tick_params(
axis='y', # changes apply to the x2-axis
which='both', # both major and minor ticks are affected
left=args.tickleft, # ticks along the left axis
right=args.tickright, # ticks along the right axis
labelleft=args.tickleft, # labels along the left axis
labelright=args.tickright) # labels along the right axis
# if tick font size and family not speciefied, then inherit from axis labels
if args.tick1size is None:
tick_1_font_size = label_1_size - 2
else:
tick_1_font_size = float(args.tick1size)
if args.tick2size is None:
tick_2_font_size = label_2_size - 2
else:
tick_2_font_size = float(args.tick2size)
# axis 1
tick_1_location, tick_1_label, tick_1_minor = define_tick(args.ticks1, args.tick1beg, args.tick1end,
args.tick1d, args.mtick1, x1beg, x1end,
n1end - n1beg + 1, d1, axis1len,
args.tick1format, extend)
plt.yticks(tick_1_location, tick_1_label, fontsize=tick_1_font_size, rotation=float(args.tick1rot))
if not args.tick1label:
ax.yaxis.set_ticklabels([])
# axis 2
tick_2_location, tick_2_label, tick_2_minor = define_tick(args.ticks2, args.tick2beg, args.tick2end,
args.tick2d, args.mtick2, x2beg, x2end,
n2end - n2beg + 1, d2, axis2len,
args.tick2format, extend)
plt.xticks(tick_2_location, tick_2_label, fontsize=tick_2_font_size, rotation=float(args.tick2rot))
if not args.tick2label:
ax.xaxis.set_ticklabels([])
# major and minor ticks sytle
ax.tick_params('both', length=float(args.tickmajorlen), width=float(args.tickmajorwid), which='major')
# minor tick positions
ax.set_yticks(tick_1_minor, minor=True)
ax.set_xticks(tick_2_minor, minor=True)
# minor ticks style
if args.tickminorlen is None:
tick_minor_length = 0.5 * float(args.tickmajorlen)
else:
tick_minor_length = float(args.tickminorlen)
if args.tickminorwid is None:
tick_minor_width = 0.75 * float(args.tickmajorwid)
else:
tick_minor_width = float(args.tickminorwid)
ax.tick_params('both', length=tick_minor_length, width=tick_minor_width, which='minor')
for l in ax.yaxis.get_ticklabels():
l.set_fontproperties(font)
l.set_fontsize(tick_1_font_size)
for l in ax.xaxis.get_ticklabels():
l.set_fontproperties(font)
l.set_fontsize(tick_2_font_size)
# make tick labels rigid
def rigid_tick_label(tick_label):
ndec = 0
for i in tick_label:
dec = i.split('.')
if len(dec) == 2:
ll = len(dec[1])
if ll > ndec:
ndec = ll
for i in range(0, len(tick_label)):
dec = tick_label[i].split('.')
if len(dec) == 2:
ll = len(dec[1])
if ll < ndec:
for k in range(0, ndec - ll):
tick_label[i] = tick_label[i] + '0'
if len(dec) == 1 and ndec != 0:
tick_label[i] = tick_label[i] + '.'
for k in range(0, ndec):
tick_label[i] = tick_label[i] + '0'
return tick_label
| 37.475177 | 108 | 0.568793 | [
"BSD-3-Clause"
] | lanl/pymplot | src/module_tick.py | 10,568 | Python |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License 2.0;
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Spreadsheets API.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/spreadsheets/docs/3.0/reference.html#Elements
"""
# __author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import gdata.data
GS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSX_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006/extended'
INSERT_MODE = 'insert'
OVERWRITE_MODE = 'overwrite'
WORKSHEETS_REL = 'http://schemas.google.com/spreadsheets/2006#worksheetsfeed'
BATCH_POST_ID_TEMPLATE = ('https://spreadsheets.google.com/feeds/cells'
'/%s/%s/private/full')
BATCH_ENTRY_ID_TEMPLATE = '%s/R%sC%s'
BATCH_EDIT_LINK_TEMPLATE = '%s/batch'
class Error(Exception):
pass
class FieldMissing(Exception):
pass
class HeaderNotSet(Error):
"""The desired column header had no value for the row in the list feed."""
class Cell(atom.core.XmlElement):
"""The gs:cell element.
A cell in the worksheet. The <gs:cell> element can appear only as a child
of <atom:entry>.
"""
_qname = GS_TEMPLATE % 'cell'
col = 'col'
input_value = 'inputValue'
numeric_value = 'numericValue'
row = 'row'
class ColCount(atom.core.XmlElement):
"""The gs:colCount element.
Indicates the number of columns in the worksheet, including columns that
contain only empty cells. The <gs:colCount> element can appear as a child
of <atom:entry> or <atom:feed>
"""
_qname = GS_TEMPLATE % 'colCount'
class Field(atom.core.XmlElement):
"""The gs:field element.
A field single cell within a record. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'field'
index = 'index'
name = 'name'
class Column(Field):
"""The gs:column element."""
_qname = GS_TEMPLATE % 'column'
class Data(atom.core.XmlElement):
"""The gs:data element.
A data region of a table. Contained in an <atom:entry> element.
"""
_qname = GS_TEMPLATE % 'data'
column = [Column]
insertion_mode = 'insertionMode'
num_rows = 'numRows'
start_row = 'startRow'
class Header(atom.core.XmlElement):
"""The gs:header element.
Indicates which row is the header row. Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'header'
row = 'row'
class RowCount(atom.core.XmlElement):
"""The gs:rowCount element.
Indicates the number of total rows in the worksheet, including rows that
contain only empty cells. The <gs:rowCount> element can appear as a
child of <atom:entry> or <atom:feed>.
"""
_qname = GS_TEMPLATE % 'rowCount'
class Worksheet(atom.core.XmlElement):
"""The gs:worksheet element.
The worksheet where the table lives.Contained in an <atom:entry>.
"""
_qname = GS_TEMPLATE % 'worksheet'
name = 'name'
class Spreadsheet(gdata.data.GDEntry):
"""An Atom entry which represents a Google Spreadsheet."""
def find_worksheets_feed(self):
return self.find_url(WORKSHEETS_REL)
FindWorksheetsFeed = find_worksheets_feed
def get_spreadsheet_key(self):
"""Extracts the spreadsheet key unique to this spreadsheet."""
return self.get_id().split('/')[-1]
GetSpreadsheetKey = get_spreadsheet_key
class SpreadsheetsFeed(gdata.data.GDFeed):
"""An Atom feed listing a user's Google Spreadsheets."""
entry = [Spreadsheet]
class WorksheetEntry(gdata.data.GDEntry):
"""An Atom entry representing a single worksheet in a spreadsheet."""
row_count = RowCount
col_count = ColCount
def get_worksheet_id(self):
"""The worksheet ID identifies this worksheet in its spreadsheet."""
return self.get_id().split('/')[-1]
GetWorksheetId = get_worksheet_id
class WorksheetsFeed(gdata.data.GDFeed):
"""A feed containing the worksheets in a single spreadsheet."""
entry = [WorksheetEntry]
class Table(gdata.data.GDEntry):
"""An Atom entry that represents a subsection of a worksheet.
A table allows you to treat part or all of a worksheet somewhat like a
table in a database that is, as a set of structured data items. Tables
don't exist until you explicitly create them before you can use a table
feed, you have to explicitly define where the table data comes from.
"""
data = Data
header = Header
worksheet = Worksheet
def get_table_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
GetTableId = get_table_id
class TablesFeed(gdata.data.GDFeed):
"""An Atom feed containing the tables defined within a worksheet."""
entry = [Table]
class Record(gdata.data.GDEntry):
"""An Atom entry representing a single record in a table.
Note that the order of items in each record is the same as the order of
columns in the table definition, which may not match the order of
columns in the GUI.
"""
field = [Field]
def value_for_index(self, column_index):
for field in self.field:
if field.index == column_index:
return field.text
raise FieldMissing('There is no field for %s' % column_index)
ValueForIndex = value_for_index
def value_for_name(self, name):
for field in self.field:
if field.name == name:
return field.text
raise FieldMissing('There is no field for %s' % name)
ValueForName = value_for_name
def get_record_id(self):
if self.id.text:
return self.id.text.split('/')[-1]
return None
class RecordsFeed(gdata.data.GDFeed):
"""An Atom feed containing the individuals records in a table."""
entry = [Record]
class ListRow(atom.core.XmlElement):
"""A gsx column value within a row.
The local tag in the _qname is blank and must be set to the column
name. For example, when adding to a ListEntry, do:
col_value = ListRow(text='something')
col_value._qname = col_value._qname % 'mycolumnname'
"""
_qname = '{http://schemas.google.com/spreadsheets/2006/extended}%s'
class ListEntry(gdata.data.GDEntry):
"""An Atom entry representing a worksheet row in the list feed.
The values for a particular column can be get and set using
x.get_value('columnheader') and x.set_value('columnheader', 'value').
See also the explanation of column names in the ListFeed class.
"""
def get_value(self, column_name):
"""Returns the displayed text for the desired column in this row.
The formula or input which generated the displayed value is not accessible
through the list feed, to see the user's input, use the cells feed.
If a column is not present in this spreadsheet, or there is no value
for a column in this row, this method will return None.
"""
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) == 0:
return None
return values[0].text
def set_value(self, column_name, value):
"""Changes the value of cell in this row under the desired column name.
Warning: if the cell contained a formula, it will be wiped out by setting
the value using the list feed since the list feed only works with
displayed values.
No client side checking is performed on the column_name, you need to
ensure that the column_name is the local tag name in the gsx tag for the
column. For example, the column_name will not contain special characters,
spaces, uppercase letters, etc.
"""
# Try to find the column in this row to change an existing value.
values = self.get_elements(column_name, GSX_NAMESPACE)
if len(values) > 0:
values[0].text = value
else:
# There is no value in this row for the desired column, so add a new
# gsx:column_name element.
new_value = ListRow(text=value)
new_value._qname = new_value._qname % (column_name,)
self._other_elements.append(new_value)
def to_dict(self):
"""Converts this row to a mapping of column names to their values."""
result = {}
values = self.get_elements(namespace=GSX_NAMESPACE)
for item in values:
result[item._get_tag()] = item.text
return result
def from_dict(self, values):
"""Sets values for this row from the dictionary.
Old values which are already in the entry will not be removed unless
they are overwritten with new values from the dict.
"""
for column, value in values.items():
self.set_value(column, value)
class ListsFeed(gdata.data.GDFeed):
"""An Atom feed in which each entry represents a row in a worksheet.
The first row in the worksheet is used as the column names for the values
in each row. If a header cell is empty, then a unique column ID is used
for the gsx element name.
Spaces in a column name are removed from the name of the corresponding
gsx element.
Caution: The columnNames are case-insensitive. For example, if you see
a <gsx:e-mail> element in a feed, you can't know whether the column
heading in the original worksheet was "e-mail" or "E-Mail".
Note: If two or more columns have the same name, then subsequent columns
of the same name have _n appended to the columnName. For example, if the
first column name is "e-mail", followed by columns named "E-Mail" and
"E-mail", then the columnNames will be gsx:e-mail, gsx:e-mail_2, and
gsx:e-mail_3 respectively.
"""
entry = [ListEntry]
class CellEntry(gdata.data.BatchEntry):
"""An Atom entry representing a single cell in a worksheet."""
cell = Cell
class CellsFeed(gdata.data.BatchFeed):
"""An Atom feed contains one entry per cell in a worksheet.
The cell feed supports batch operations, you can send multiple cell
operations in one HTTP request.
"""
entry = [CellEntry]
def add_set_cell(self, row, col, input_value):
"""Adds a request to change the contents of a cell to this batch request.
Args:
row: int, The row number for this cell. Numbering starts at 1.
col: int, The column number for this cell. Starts at 1.
input_value: str, The desired formula/content this cell should contain.
"""
self.add_update(CellEntry(
id=atom.data.Id(text=BATCH_ENTRY_ID_TEMPLATE % (
self.id.text, row, col)),
cell=Cell(col=str(col), row=str(row), input_value=input_value)))
return self
AddSetCell = add_set_cell
def build_batch_cells_update(spreadsheet_key, worksheet_id):
"""Creates an empty cells feed for adding batch cell updates to.
Call batch_set_cell on the resulting CellsFeed instance then send the batch
request TODO: fill in
Args:
spreadsheet_key: The ID of the spreadsheet
worksheet_id:
"""
feed_id_text = BATCH_POST_ID_TEMPLATE % (spreadsheet_key, worksheet_id)
return CellsFeed(
id=atom.data.Id(text=feed_id_text),
link=[atom.data.Link(
rel='edit', href=BATCH_EDIT_LINK_TEMPLATE % (feed_id_text,))])
BuildBatchCellsUpdate = build_batch_cells_update
| 31.162162 | 82 | 0.674761 | [
"Apache-2.0"
] | BinaryMuse/gdata-python3 | src/gdata/spreadsheets/data.py | 11,530 | Python |
import numpy as np
import pandas as pd
from openpyxl import load_workbook
import sys
def print_array_to_excel(array, first_cell, ws, axis=2):
'''
Print an np array to excel using openpyxl
:param array: np array
:param first_cell: first cell to start dumping values in
:param ws: worksheet reference. From openpyxl, ws=wb[sheetname]
:param axis: to determine if the array is a col vector (0), row vector (1), or 2d matrix (2)
'''
if isinstance(array, (list,)):
array = np.array(array)
shape = array.shape
if axis == 0:
# Treat array as col vector and print along the rows
array.flatten() # Flatten in case the input array is a nx1 ndarry which acts weird
for i in range(shape[0]):
j = 0
ws.cell(i + first_cell[0], j + first_cell[1]).value = array[i]
elif axis == 1:
# Treat array as row vector and print along the columns
array.flatten() # Flatten in case the input array is a 1xn ndarry which acts weird
for j in range(shape[0]):
i = 0
ws.cell(i + first_cell[0], j + first_cell[1]).value = array[j]
elif axis == 2:
# If axis==2, means it is a 2d array
for i in range(shape[0]):
for j in range(shape[1]):
ws.cell(i + first_cell[0], j + first_cell[1]).value = array[i, j]
if __name__ == '__main__':
print('hi') | 38.189189 | 96 | 0.610757 | [
"MIT"
] | acceleratedmaterials/NUS_AMDworkshop | gold nanocluster synthesis/own_package/others.py | 1,413 | Python |
# -*- coding: utf-8 -*-
# URL : https://leetcode-cn.com/problems/median-of-two-sorted-arrays/
""""""
"""
problem:
给定两个大小为 m 和 n 的有序数组 nums1 和 nums2。
请你找出这两个有序数组的中位数,并且要求算法的时间复杂度为 O(log(m + n))。
你可以假设 nums1 和 nums2 不会同时为空。
示例 1:
nums1 = [1, 3]
nums2 = [2]
则中位数是 2.0
示例 2:
nums1 = [1, 2]
nums2 = [3, 4]
则中位数是 (2 + 3)/2 = 2.5
"""
"""
explain:
看清楚,复杂度是 O(log(m + n)),而不是 O(m + n),所以不能合并这两个数组,要原封不动,用下标去访问找出中位数。
中位数就是排序数组序列的中间位置的元素,奇数个元素取一个中间元素,偶数个元素取中间两个元素求平均。
要寻找的两个元素(非下标):(m + n + 1) / 2,(m + n + 2) / 2,当元素个数为奇数个时,这两个值是相等的,因此可以寻找这两个位置的元素出来求平均。
题目转变成找出第 k 个的元素,这里的 k 就是上面那两个。
这两个数组,是各自有序,要找这两个的元素,就需要进行比较淘汰。
找第 k 个元素的过程:
取出各自下标为 k / 2 - 1 的元素,也就是中间元素,这里就可以使得复杂度为 log 级别。
如果 nums1 < nums2,就表明 nums1 前面 k / 2 不可能有合并之后的 k,可以淘汰 nums1 的前 k / 2 个元素;
如果 nums1 > nums2,也表明 nums2 前面 k / 2 可以淘汰。
淘汰之后,k 变为 k - k / 2。
另外,k == 1 时,就不存在 k / 2(中间元素),此时比较 nums1、nums2 当前索引值的大小,取小的那一个,因为这里是取第 1(k) 个元素。
当索引值超出对应的 nums 长度时,表明 k 在另一个数组中,可以返回下标为 (索引值 + k - 1) 的元素,其中(k - 1)就是取下标。
演示:
nums1 = [1, 2, 3]
nums2 = [4, 5, 6]
根据 (m + n + 1) / 2,(m + n + 2) / 2,需要找出第 3,4 这两个元素,求平均值
初始索引值:index1 = index2 = 0
找 k == 3 的过程:
1. 根据 k / 2 - 1,各自取出下标为 0 的元素,分别是 1 和 4;由于 1 < 4,所以淘汰 nums1 中的前 k / 2 个元素,即 index1(索引值)为 1。
2. 根据 k - k / 2,k 变更为 2。
3. 变成寻找 k == 2 的过程,重复 1、2 步骤。
4. 各自取出下标为 0 的元素(叠加索引值),分别是 2 和 4;由于 2 < 4,所以 nums1 只剩下 3 这个元素,即 index1 == 2。
5. k 变更为 1。
6. 比较 nums1、nums2 当前索引值的大小,取小的那一个,即 3 和 4,取元素 3。
找 k == 4 的过程:
1. 根据 k / 2 - 1,各自取出下标为 1 的元素,分别是 2 和 5;由于 2 < 5,所以淘汰 nums1 中的前 k / 2 个元素,即 index1(索引值)为 2。
2. 根据 k - k / 2,k 变更为 2。
3. 变成寻找 k == 2 的过程,重复 1、2 步骤。
4. 各自取出下标为 0 的元素(叠加索引值),分别是 3 和 4;由于 3 < 4,所以 index1 == 3。
5. k 变更为 1。
6. 判断 index1 >= nums1.length,即 nums1 全部淘汰,取 nums2 中下标为 (index2 + k - 1)的元素,即元素 4。
平均值(中位数):
(3 + 4) / 2 = 3.5
"""
"""
out:
执行用时 : 88 ms, 在所有 python 提交中击败了 63.81% 的用户
内存消耗 : 11.8 MB, 在所有 python 提交中击败了 32.58% 的用户
"""
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
m = len(nums1)
n = len(nums2)
def find_kth(nums1, nums2, index1, index2, k):
# 索引值范围检查
if index1 >= len(nums1):
return nums2[index2 + k - 1]
if index2 >= len(nums2):
return nums1[index1 + k - 1]
# k == 1
if k == 1:
return nums1[index1] if nums1[index1] < nums2[index2] else nums2[index2]
# 取中间值比较淘汰
do_discard_nums1 = True
mid = k // 2 - 1
if index1 + mid >= len(nums1) or (
index2 + mid < len(nums2) and nums1[index1 + mid] > nums2[index2 + mid]
):
do_discard_nums1 = False
mid += 1
if do_discard_nums1:
# 淘汰 nums1 的 mid 前面的元素
return find_kth(nums1, nums2, index1 + mid, index2, k - mid)
else:
return find_kth(nums1, nums2, index1, index2 + mid, k - mid)
return (
find_kth(nums1, nums2, 0, 0, (m + n + 1) // 2)
+ find_kth(nums1, nums2, 0, 0, (m + n + 2) // 2)
) / 2.0
if __name__ == "__main__":
solution = Solution()
assert solution.findMedianSortedArrays([1, 3], [2]) == 2.0
assert solution.findMedianSortedArrays([2], [1, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2], [3, 4]) == 2.5
assert solution.findMedianSortedArrays([1, 3], [2, 4]) == 2.5
assert solution.findMedianSortedArrays([], [1]) == 1.0
assert solution.findMedianSortedArrays([1], []) == 1.0
assert solution.findMedianSortedArrays([1, 3], []) == 2.0
assert solution.findMedianSortedArrays([], [1, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2, 3], []) == 2.0
assert solution.findMedianSortedArrays([], [1, 2, 3]) == 2.0
assert solution.findMedianSortedArrays([1, 2, 3, 5], [4, 6, 7, 8, 9]) == 5.0
assert solution.findMedianSortedArrays([1], [2, 3, 4, 5, 6]) == 3.5
| 30.450382 | 91 | 0.57107 | [
"Apache-2.0"
] | Buddy119/algorithm | Codes/xiaohong2019/leetcode/4_median_of_two_sorted_arrays.py | 5,630 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-27 14:08
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0011_auto_20170727_1324'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='partner_subtitle',
field=wagtail.core.fields.RichTextField(blank=True),
),
migrations.AddField(
model_name='homepage',
name='partner_title',
field=wagtail.core.fields.RichTextField(blank=True),
),
]
| 24.62963 | 64 | 0.618045 | [
"BSD-3-Clause"
] | evonove/evonove | django-website/home/migrations/0012_auto_20170727_1408.py | 665 | Python |
from __future__ import annotations
import re
from abc import abstractmethod, ABC
from enum import Enum
from typing import List, Optional, Literal, Tuple, Union
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
import ulauncher.api.shared.event as events
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.DoNothingAction import DoNothingAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
class DemoExtension(Extension):
def __init__(self):
super().__init__()
self.subscribe(events.KeywordQueryEvent, KeywordQueryEventListener())
class Number(ABC):
@classmethod
def parse(cls, payload: str, encoding: Encoding) -> Union[Number, ExtensionResultItem]:
if len(payload) == 0:
return ExtensionResultItem(
icon='images/icon.png',
name='No input',
description=f"Please input a {encoding} number",
on_enter=DoNothingAction(),
)
try:
value = encoding.decode(payload)
return Number(value)
except ValueError:
msg = "Failed to convert number"
description = f"Value {payload} is not a {encoding} number."
return ExtensionResultItem(
icon='images/icon.png',
name=msg,
description=description,
on_enter=DoNothingAction(),
on_alt_enter=DoNothingAction(),
)
def __init__(self, value: int):
self.value = value
def result_item(self, encoding: Encoding) -> ExtensionResultItem:
payload = encoding.encode(self.value)
return ExtensionResultItem(
icon=encoding.icon,
name=payload,
description=encoding.__str__().capitalize() + '; Copy to clipboard.',
on_enter=CopyToClipboardAction(payload),
on_alt_enter=CopyToClipboardAction(payload),
)
class Encoding:
@abstractmethod
def base(self) -> int:
pass
@property
def icon(self) -> str:
return 'images/icon.png'
@abstractmethod
def __str__(self):
pass
@abstractmethod
def encode(self, value: int) -> str:
pass
def decode(self, value: str) -> int:
return int(value, self.base())
class Hexadecimal(Encoding):
def base(self) -> int:
return 16
@property
def icon(self) -> str:
return 'images/hex.png'
def __str__(self):
return "hexadecimal"
def encode(self, value: int) -> str:
return hex(value)[2:]
class Decimal(Encoding):
def base(self) -> int:
return 10
@property
def icon(self) -> str:
return 'images/dec.png'
def __str__(self):
return "decimal"
def encode(self, value: int) -> str:
return str(value)
class Binary(Encoding):
def base(self) -> int:
return 2
@property
def icon(self) -> str:
return 'images/bin.png'
def __str__(self):
return "binary"
def encode(self, value: int) -> str:
return bin(value)[2:]
class KeywordQueryEventListener(EventListener):
def on_event(self, event: events.KeywordQueryEvent, extension: Extension):
arg = event.get_argument() or ""
value = re.split(r"\s+", arg)[0]
kw = event.get_keyword()
if kw == extension.preferences["kw_hex"]:
num = Number.parse(value, Hexadecimal())
encodings = [Decimal(), Binary()]
elif kw == extension.preferences["kw_bin"]:
num = Number.parse(value, Binary())
encodings = [Decimal(), Hexadecimal()]
elif kw == extension.preferences["kw_dec"]:
num = Number.parse(value, Decimal())
encodings = [Hexadecimal(), Binary()]
else:
raise RuntimeError()
if isinstance(num, ExtensionResultItem):
items = [num]
else:
items = list(map(lambda enc: num.result_item(enc), encodings))
return RenderResultListAction(items)
if __name__ == '__main__':
DemoExtension().run()
| 28.188312 | 91 | 0.619443 | [
"Apache-2.0"
] | Troublor/ulauncher-numconverter | main.py | 4,341 | Python |
from diogi.functions import *
from diogi.conventions import to_data
from .docs import WithDocsMixin
def noop_resolver(href: str) -> dict:
pass
class Descriptor:
@staticmethod
def parse(obj: any, resolver: callable):
if dict == type(obj):
href = get_if_exists(obj, "href", None)
resolved = obj
if href:
resolved = {**default_if_none(resolver(href), {}), **obj}
desc_type = get_if_exists(resolved, "type", "semantic")
docs = get_if_exists(resolved, "doc", None)
else:
return None
# desc = None
id = get_if_exists(resolved, "id")
name = get_if_exists(resolved, "name")
if desc_type == "semantic":
desc = Semantic(id=id, name=name)
elif desc_type == "safe":
desc = Safe(id=id, name=name)
elif desc_type == "unsafe":
desc = Unsafe(id=id, name=name)
elif desc_type == "idempotent":
desc = Idempotent(id=id, name=name)
if docs:
add_doc = getattr(desc, "add_doc", None)
if add_doc:
add_doc(docs)
for d in always_a_list(get_if_exists(resolved, "descriptor", [])):
desc.add_descriptor(d, resolver)
return desc
class DescriptorBase(WithDocsMixin):
def __init__(self):
super(DescriptorBase, self).__init__()
self.contents = {}
@property
def id(self):
return get_if_exists(self.contents, "id", None)
@property
def name(self):
return get_if_exists(self.contents, "name", None)
@property
def descriptors(self):
return always_a_list(get_if_exists(self.contents, "descriptor", []))
def add_descriptor(
self, descriptor: Descriptor, resolver: callable = noop_resolver
):
if not isinstance(descriptor, Descriptor):
descriptor = Descriptor.parse(descriptor, resolver)
append_if_not_none(self.contents, descriptor, "descriptor")
return self
def get_descriptor(self, id: str) -> Descriptor:
return list_is_optional(
[d for d in get_if_exists(self.contents, "descriptor", []) if d.id == id]
)
def as_data(self):
data = {}
for k, v in self.contents.items():
set_if_not_none(data, to_data(list_is_optional(v)), k)
return data
def __eq__(self, other):
if type(other) is type(self):
return self.contents == other.contents
else:
return False
def __hash__(self):
return hash((self.contents, self.contents))
class SimpleDescriptor(Descriptor, DescriptorBase):
def __init__(
self,
id: str = None,
text: str = None,
ref: str = None,
name: str = None,
*args,
**kwargs
):
super().__init__(*args, **kwargs)
self.contents["id"] = id
self.contents["text"] = text
self.contents["ref"] = ref
self.contents["name"] = name
class Idempotent(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "idempotent"
class ReferencingDescriptor(SimpleDescriptor):
def __init__(self, ref: str, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["ref"] = ref
class Safe(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "safe"
class Semantic(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "semantic"
class Unsafe(SimpleDescriptor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.contents["type"] = "unsafe"
| 27.453901 | 85 | 0.587703 | [
"MIT"
] | michalporeba/alps-py | alps/descriptors.py | 3,871 | Python |
import disnake
from disnake.ext import commands
# Define a simple View that persists between bot restarts
# In order a view to persist between restarts it needs to meet the following conditions:
# 1) The timeout of the View has to be set to None
# 2) Every item in the View has to have a custom_id set
# It is recommended that the custom_id be sufficiently unique to
# prevent conflicts with other buttons the bot sends.
# For this example the custom_id is prefixed with the name of the bot.
# Note that custom_ids can only be up to 100 characters long.
class PersistentView(disnake.ui.View):
def __init__(self):
super().__init__(timeout=None)
@disnake.ui.button(
label="Green", style=disnake.ButtonStyle.green, custom_id="persistent_view:green"
)
async def green(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is green.", ephemeral=True)
@disnake.ui.button(label="Red", style=disnake.ButtonStyle.red, custom_id="persistent_view:red")
async def red(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is red.", ephemeral=True)
@disnake.ui.button(
label="Grey", style=disnake.ButtonStyle.grey, custom_id="persistent_view:grey"
)
async def grey(self, button: disnake.ui.Button, interaction: disnake.MessageInteraction):
await interaction.response.send_message("This is grey.", ephemeral=True)
class PersistentViewBot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned)
self.persistent_views_added = False
async def on_ready(self):
if not self.persistent_views_added:
# Register the persistent view for listening here.
# Note that this does not send the view to any message.
# In order to do this you need to first send a message with the View, which is shown below.
# If you have the message_id you can also pass it as a keyword argument, but for this example
# we don't have one.
self.add_view(PersistentView())
self.persistent_views_added = True
print(f"Logged in as {self.user} (ID: {self.user.id})")
print("------")
bot = PersistentViewBot()
@bot.command()
@commands.is_owner()
async def prepare(ctx: commands.Context):
"""Starts a persistent view."""
# In order for a persistent view to be listened to, it needs to be sent to an actual message.
# Call this method once just to store it somewhere.
# In a more complicated program you might fetch the message_id from a database for use later.
# However this is outside of the scope of this simple example.
await ctx.send("What's your favourite colour?", view=PersistentView())
bot.run("token")
| 42.397059 | 105 | 0.712105 | [
"MIT"
] | Chromosomologist/disnake | examples/views/persistent.py | 2,883 | Python |
import glob
from itertools import chain
from os import path
import numpy as np
import torch.utils.data as data
import umsgpack
from PIL import Image
class ISSDataset(data.Dataset):
"""Instance segmentation dataset
This assumes the dataset to be formatted as defined in:
https://github.com/mapillary/seamseg/wiki/Dataset-format
Parameters
----------
root_dir : str
Path to the root directory of the dataset
split_name : str
Name of the split to load: this must correspond to one of the files in `root_dir/lst`
transform : callable
Transformer function applied to the loaded entries to prepare them for pytorch. This should be callable as
`transform(img, msk, cat, cls)`, where:
- `img` is a PIL.Image with `mode="RGB"`, containing the RGB data
- `msk` is a list of PIL.Image with `mode="L"`, containing the instance segmentation masks
- `cat` is a list containing the instance id to class id mapping
- `cls` is an integer specifying a requested class for class-uniform sampling, or None
"""
_IMG_DIR = "img"
_MSK_DIR = "msk"
_LST_DIR = "lst"
_METADATA_FILE = "metadata.bin"
def __init__(self, root_dir, split_name, transform):
super(ISSDataset, self).__init__()
self.root_dir = root_dir
self.split_name = split_name
self.transform = transform
# Folders
self._img_dir = path.join(root_dir, ISSDataset._IMG_DIR)
self._msk_dir = path.join(root_dir, ISSDataset._MSK_DIR)
self._lst_dir = path.join(root_dir, ISSDataset._LST_DIR)
for d in self._img_dir, self._msk_dir, self._lst_dir:
if not path.isdir(d):
raise IOError("Dataset sub-folder {} does not exist".format(d))
# Load meta-data and split
self._meta, self._images = self._load_split()
def _load_split(self):
with open(path.join(self.root_dir, ISSDataset._METADATA_FILE), "rb") as fid:
metadata = umsgpack.unpack(fid, encoding="utf-8")
with open(path.join(self._lst_dir, self.split_name + ".txt"), "r") as fid:
lst = fid.readlines()
lst = set(line.strip() for line in lst)
meta = metadata["meta"]
images = [img_desc for img_desc in metadata["images"] if img_desc["id"] in lst]
return meta, images
def _load_item(self, item):
img_desc = self._images[item]
img_file = path.join(self._img_dir, img_desc["id"])
if path.exists(img_file + ".png"):
img_file = img_file + ".png"
elif path.exists(img_file + ".jpg"):
img_file = img_file + ".jpg"
else:
raise IOError("Cannot find any image for id {} in {}".format(img_desc["id"], self._img_dir))
img = Image.open(img_file).convert(mode="RGB")
# Load all masks
msk_file = path.join(self._msk_dir, img_desc["id"] + ".png")
msk = [Image.open(msk_file)]
i = 1
while path.exists("{}.{}".format(msk_file, i)):
msk.append(Image.open("{}.{}".format(msk_file, i)))
i += 1
cat = img_desc["cat"]
iscrowd = img_desc["iscrowd"]
return img, msk, cat, iscrowd, img_desc["id"]
@property
def categories(self):
"""Category names"""
return self._meta["categories"]
@property
def num_categories(self):
"""Number of categories"""
return len(self.categories)
@property
def num_stuff(self):
"""Number of "stuff" categories"""
return self._meta["num_stuff"]
@property
def num_thing(self):
"""Number of "thing" categories"""
return self.num_categories - self.num_stuff
@property
def original_ids(self):
"""Original class id of each category"""
return self._meta["original_ids"]
@property
def palette(self):
"""Default palette to be used when color-coding semantic labels"""
return np.array(self._meta["palette"], dtype=np.uint8)
@property
def img_sizes(self):
"""Size of each image of the dataset"""
return [img_desc["size"] for img_desc in self._images]
@property
def img_categories(self):
"""Categories present in each image of the dataset"""
return [img_desc["cat"] for img_desc in self._images]
def __len__(self):
return len(self._images)
def __getitem__(self, item):
img, msk, cat, iscrowd, idx = self._load_item(item)
rec = self.transform(img, msk, cat, iscrowd)
size = (img.size[1], img.size[0])
img.close()
for m in msk:
m.close()
rec["idx"] = idx
rec["size"] = size
return rec
def get_raw_image(self, idx):
"""Load a single, unmodified image with given id from the dataset"""
img_file = path.join(self._img_dir, idx)
if path.exists(img_file + ".png"):
img_file = img_file + ".png"
elif path.exists(img_file + ".jpg"):
img_file = img_file + ".jpg"
else:
raise IOError("Cannot find any image for id {} in {}".format(idx, self._img_dir))
return Image.open(img_file)
def get_image_desc(self, idx):
"""Look up an image descriptor given the id"""
matching = [img_desc for img_desc in self._images if img_desc["id"] == idx]
if len(matching) == 1:
return matching[0]
else:
raise ValueError("No image found with id %s" % idx)
class ISSTestDataset(data.Dataset):
_EXTENSIONS = ["*.jpg", "*.jpeg", "*.png"]
def __init__(self, in_dir, transform):
super(ISSTestDataset, self).__init__()
self.in_dir = in_dir
self.transform = transform
# Find all images
self._images = []
for img_path in chain(
*(glob.iglob(path.join(self.in_dir, '**', ext), recursive=True) for ext in ISSTestDataset._EXTENSIONS)):
_, name_with_ext = path.split(img_path)
idx, _ = path.splitext(name_with_ext)
with Image.open(img_path) as img_raw:
size = (img_raw.size[1], img_raw.size[0])
self._images.append({
"idx": idx,
"path": img_path,
"size": size,
})
@property
def img_sizes(self):
"""Size of each image of the dataset"""
return [img_desc["size"] for img_desc in self._images]
def __len__(self):
return len(self._images)
def __getitem__(self, item):
# Load image
with Image.open(self._images[item]["path"]) as img_raw:
size = (img_raw.size[1], img_raw.size[0])
img = self.transform(img_raw.convert(mode="RGB"))
return {
"img": img,
"idx": self._images[item]["idx"],
"size": size,
"abs_path": self._images[item]["path"],
"rel_path": path.relpath(self._images[item]["path"], self.in_dir),
}
| 33.140845 | 120 | 0.593285 | [
"BSD-3-Clause"
] | 030Solutions/seamseg | seamseg/data/dataset.py | 7,059 | Python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any
from azure.core.credentials import TokenCredential
VERSION = "unknown"
class MonitorClientConfiguration(Configuration):
"""Configuration for MonitorClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
"""
def __init__(
self,
credential, # type: "TokenCredential"
**kwargs # type: Any
):
# type: (...) -> None
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(MonitorClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.api_version = "2017-05-01-preview"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-eventhub/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.BearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 45.307692 | 129 | 0.681834 | [
"MIT"
] | BillmanH/azure-sdk-for-python | sdk/monitor/azure-mgmt-monitor/azure/mgmt/monitor/v2017_05_01_preview/_configuration.py | 2,945 | Python |
# -*- test-case-name: vumi.transports.xmpp.tests.test_xmpp -*-
# -*- encoding: utf-8 -*-
from twisted.python import log
from twisted.words.protocols.jabber.jid import JID
from twisted.words.xish import domish
from twisted.words.xish.domish import Element as DomishElement
from twisted.internet.task import LoopingCall
from twisted.internet.defer import inlineCallbacks
from wokkel.client import XMPPClient
from wokkel.ping import PingClientProtocol
from wokkel.xmppim import (RosterClientProtocol, MessageProtocol,
PresenceClientProtocol)
from vumi.transports.base import Transport
class TransportRosterClientProtocol(RosterClientProtocol):
def connectionInitialized(self):
# get the roster as soon as the connection's been initialized, this
# allows us to see who's online but more importantly, allows us to see
# who's added us to their roster. This allows us to auto subscribe to
# anyone, automatically adding them to our roster, skips the "user ...
# wants to add you to their roster, allow? yes/no" hoopla.
self.getRoster()
class TransportPresenceClientProtocol(PresenceClientProtocol):
"""
A custom presence protocol to automatically accept any subscription
attempt.
"""
def __init__(self, initialized_callback, *args, **kwargs):
super(TransportPresenceClientProtocol, self).__init__(*args, **kwargs)
self.initialized_callback = initialized_callback
def connectionInitialized(self):
super(TransportPresenceClientProtocol, self).connectionInitialized()
self.initialized_callback()
def subscribeReceived(self, entity):
self.subscribe(entity)
self.subscribed(entity)
def unsubscribeReceived(self, entity):
self.unsubscribe(entity)
self.unsubscribed(entity)
class XMPPTransportProtocol(MessageProtocol, object):
def __init__(self, jid, message_callback, connection_callback,
connection_lost_callback=None,):
super(MessageProtocol, self).__init__()
self.jid = jid
self.message_callback = message_callback
self.connection_callback = connection_callback
self.connection_lost_callback = connection_lost_callback
def reply(self, jid, content):
message = domish.Element((None, "message"))
# intentionally leaving from blank, leaving for XMPP server
# to figure out
message['to'] = jid
message['type'] = 'chat'
message.addUniqueId()
message.addElement((None, 'body'), content=content)
self.xmlstream.send(message)
def onMessage(self, message):
"""Messages sent to the bot will arrive here. Command handling routing
is done in this function."""
if not isinstance(message.body, DomishElement):
return None
text = unicode(message.body).encode('utf-8').strip()
from_addr, _, _ = message['from'].partition('/')
self.message_callback(
to_addr=self.jid.userhost(),
from_addr=from_addr,
content=text,
transport_type='xmpp',
transport_metadata={
'xmpp_id': message.getAttribute('id'),
})
def connectionMade(self):
self.connection_callback()
return super(XMPPTransportProtocol, self).connectionMade()
def connectionLost(self, reason):
if self.connection_lost_callback is not None:
self.connection_lost_callback(reason)
log.msg("XMPP Connection lost.")
super(XMPPTransportProtocol, self).connectionLost(reason)
class XMPPTransport(Transport):
"""XMPP transport.
Configuration parameters:
:type host: str
:param host:
The host of the XMPP server to connect to.
:type port: int
:param port:
The port on the XMPP host to connect to.
:type debug: bool
:param debug:
Whether or not to show all the XMPP traffic. Defaults to False.
:type username: str
:param username:
The XMPP account username
:type password: str
:param password:
The XMPP account password
:type status: str
:param status:
The XMPP status 'away', 'xa', 'chat' or 'dnd'
:type status_message: str
:param status_message:
The natural language status message for this XMPP transport.
:type presence_interval: int
:param presence_interval:
How often (in seconds) to send a presence update to the roster.
:type ping_interval: int
:param ping_interval:
How often (in seconds) to send a keep-alive ping to the XMPP server
to keep the connection alive. Defaults to 60 seconds.
"""
start_message_consumer = False
_xmpp_protocol = XMPPTransportProtocol
_xmpp_client = XMPPClient
def __init__(self, options, config=None):
super(XMPPTransport, self).__init__(options, config=config)
self.ping_call = LoopingCall(self.send_ping)
self.presence_call = LoopingCall(self.send_presence)
def validate_config(self):
self.host = self.config['host']
self.port = int(self.config['port'])
self.debug = self.config.get('debug', False)
self.username = self.config['username']
self.password = self.config['password']
self.status = self.config['status']
self.status_message = self.config.get('status_message', '')
self.ping_interval = self.config.get('ping_interval', 60)
self.presence_interval = self.config.get('presence_interval', 60)
def setup_transport(self):
log.msg("Starting XMPPTransport: %s" % self.transport_name)
self.jid = JID(self.username)
self.xmpp_client = self._xmpp_client(self.jid, self.password,
self.host, self.port)
self.xmpp_client.logTraffic = self.debug
self.xmpp_client.setServiceParent(self)
self.presence = TransportPresenceClientProtocol(self.announce_presence)
self.presence.setHandlerParent(self.xmpp_client)
self.pinger = PingClientProtocol()
self.pinger.setHandlerParent(self.xmpp_client)
self.ping_call.start(self.ping_interval, now=False)
roster = TransportRosterClientProtocol()
roster.setHandlerParent(self.xmpp_client)
self.xmpp_protocol = self._xmpp_protocol(
self.jid, self.publish_message, self.message_consumer.unpause)
self.xmpp_protocol.setHandlerParent(self.xmpp_client)
log.msg("XMPPTransport %s started." % self.transport_name)
def announce_presence(self):
if not self.presence_call.running:
self.presence_call.start(self.presence_interval)
@inlineCallbacks
def send_ping(self):
if self.xmpp_client.xmlstream:
yield self.pinger.ping(self.jid)
def send_presence(self):
if self.xmpp_client.xmlstream:
self.presence.available(statuses={
None: self.status})
def teardown_transport(self):
log.msg("XMPPTransport %s stopped." % self.transport_name)
ping_call = getattr(self, 'ping_call', None)
if ping_call and ping_call.running:
ping_call.stop()
presence_call = getattr(self, 'presence_call', None)
if presence_call and presence_call.running:
presence_call.stop()
def handle_outbound_message(self, message):
recipient = message['to_addr']
text = message['content']
jid = JID(recipient).userhost()
if not self.xmpp_protocol.xmlstream:
log.err("Outbound undeliverable, XMPP not initialized yet.")
return False
else:
self.xmpp_protocol.reply(jid, text)
| 36.474178 | 79 | 0.671644 | [
"BSD-3-Clause"
] | rapidsms/vumi | vumi/transports/xmpp/xmpp.py | 7,769 | Python |
from fake_useragent import UserAgent
import requests
from jsonpath import jsonpath
url = "https://www.lagou.com/lbs/getAllCitySearchLabels.json"
headers = {"User-Agent": UserAgent().chrome}
resp = requests.get(url, headers=headers)
ids = jsonpath(resp.json(), "$..id")
names = jsonpath(resp.json(), "$..name")
for id, name in zip(ids, names):
print(id, ":", name)
| 23.3125 | 61 | 0.705094 | [
"MIT"
] | littleturings/2021PythonWebCrawler | Lecture_notes/数据提取与验证码的识别(上)/code/jsonpath_test.py | 373 | Python |
# /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Modul is used for GUI of Lisa
"""
from loguru import logger
import sys
import click
from pathlib import Path
import ast
from . import app_tools
# print("start")
# from . import image
# print("start 5")
# print("start 6")
# from scaffan import algorithm
from . import algorithm
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
# print("Running __main__.py")
# @batch_detect.command(context_settings=CONTEXT_SETTINGS)
# @click.argument("image_stack_dir", type=click.Path(exists=True))
# @click.argument("working_dir", type=click.Path())
# @click.option("--create-icon", is_flag=True,
# help="Create desktop icon"
# )
@click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True)
@click.pass_context
def run(ctx, *args, **kwargs):
if ctx.invoked_subcommand is None:
# click.echo('I was invoked without subcommand')
ctx.invoke(gui, *args, **kwargs)
# a.main()
else:
pass
click.echo("I am about to invoke %s" % ctx.invoked_subcommand)
pass
# @run.command(context_settings=CONTEXT_SETTINGS, help="Set persistent values")
# @click.option("--common-spreadsheet-file", help="Set path for common spreadsheet file.", type=click.Path())
# def set(common_spreadsheet_file=None):
# mainapp = algorithm.AnimalWatch()
# if common_spreadsheet_file is not None:
# mainapp.set_common_spreadsheet_file(path=common_spreadsheet_file)
# logger.info(f"Common spreadsheet file path is : {common_spreadsheet_file}")
# print(f"Common spreadsheet file path is : {common_spreadsheet_file}")
# def print_params(params):
# algorithm.Scaffan().parameters.
# params.
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--params",
"-p",
multiple=True,
default=None,
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m scaffan gui -p Processing;Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def gui(params, print_params):
mainapp = algorithm.AnimalWatch()
if print_params:
make_print_params(mainapp)
exit()
# mainapp.parameters.param(*param[0].split(";")).setValue(ast.literal_eval(param[1]))
set_params(mainapp, params)
mainapp.start_gui()
def set_params(mainapp, params):
if params is not None:
logger.debug("set_params() ...")
app_tools.set_parameters_by_path(mainapp.parameters, params)
# for param in params:
# mainapp.set_parameter(param[0], value=ast.literal_eval(param[1]))
def make_print_params(mainapp):
import pprint
pprint.pprint(mainapp.parameters_to_dict())
@run.command(
context_settings=CONTEXT_SETTINGS, help="Create an icon on Windows platform"
)
def install():
from .app_tools import create_icon
icon_filename = Path(__file__).parent / Path("anwa.ico")
create_icon("anwa", icon_filename, conda_env_name="anwa_app")
# print(platform.system)
# if platform.system() == "Windows":
# import pathlib
# pass
@run.command(context_settings=CONTEXT_SETTINGS)
@click.option(
"--input-path",
"-i",
type=click.Path(exists=True),
help='Path to input directory with video files.',
)
@click.option(
"--params",
"-p",
multiple=True,
default=None,
nargs=2,
help='Set parameter. First argument is path to parameter separated by ";". Second is the value.'
"python -m anwa nogui -p Processing;Show True",
)
@click.option("--print-params", "-pp", is_flag=True, help="Print parameters")
def nogui(input_path, params, print_params):
mainapp = algorithm.AnimalWatch()
logger.debug(f"params={params})")
if print_params:
make_print_params(mainapp)
exit()
set_params(mainapp, params)
# for param in params:
# mainapp.parameters.param(*param[0].split(";")).setValue(
# ast.literal_eval(param[1])
# )
mainapp.set_input_dir(input_path)
# mainapp.start_gui()
mainapp.run()
# def install():
| 28.634483 | 110 | 0.678227 | [
"MIT"
] | mjirik/animalwatch | anwa/main_click.py | 4,152 | Python |
'''
Autor: Gurkirt Singh
Start data: 2nd May 2016
purpose: of this file is to take all .mp4 videos and convert them to jpg images
'''
import numpy as np
import cv2 as cv2
import math,pickle,shutil,os
baseDir = "/mnt/sun-alpha/actnet/";
vidDir = "/mnt/earth-beta/actnet/videos/";
imgDir = "/mnt/sun-alpha/actnet/rgb-images/";
annotPklFile = "../Evaluation/data/actNet200-V1-3.pkl"
#os.mkdir(imgDir)
annotFile = "../anetv13.json"
def getAnnotations():
with open(annotFile) as f:
annoData = json.load(f)
taxonomy = annoData["taxonomy"]
version = annoData["version"]
database = annoData["database"]
print len(database),version,len(taxonomy)
def getNumFrames(filename):
cap = cv2.VideoCapture(filename)
if not cap.isOpened():
print "could not open :",filename
return -1
numf = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
return numf
def getVidedInfo(filename):
try:
cap = cv2.VideoCapture(filename)
except cv2.error as e:
print e
return 0,0,0,0,-1
if not cap.isOpened():
print "could not open :",filename
return 0,0,0,0,-1
numf = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT))
width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
return numf,width,height,fps,cap
def getsmallestDimto256(width,height):
if width>=height:
newH = 256
newW = int(math.ceil((float(newH)/height)*width))
else:
newW = 256
newH = int(math.ceil((float(newW)/width)*height))
return newW,newH
def getframelabels(annotations,numf):
framelabels = np.ones(numf,dtype='uint16')*200;
for annot in annotations:
actionId = annot['class']
startframe = annot['sf']
endframe = annot['ef']
framelabels[startframe:endframe] = int(actionId)-1
return framelabels
def movefiles(storageDir,framelabels,numfs):
dst = ''
for ind in range(numfs):
label = framelabels[ind]
src = storageDir+str(ind).zfill(5)+".jpg"
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
shutil.move(src,dst)
print dst ,' MOVED '
def convertVideosL():
print "this is convertVideos function with labels"
ecount = 0
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in reversed(database.keys()):
ecount+=1
if ecount>0:
videoInfo = database[videoId]
storageDir = imgDir+'v_'+videoId+"/"
print videoInfo['subset']
if not videoInfo['isnull'] and not videoInfo['subset'] == 'testing':
videoname = vidDir+'v_'+videoId+'.mp4'
if not os.path.isfile(videoname):
videoname = vidDir+'v_'+videoId+'.mkv'
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
annotations = videoInfo['annotations']
framelabels = getframelabels(annotations,numfs)
imgname = storageDir+str(numfs-1).zfill(5)+".jpg"
if os.path.isfile(imgname):
movefiles(storageDir,framelabels,numfs)
else:
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if not os.path.isfile(dst):
numf,width,height,fps,cap = getVidedInfo(videoname)
if not cap == -1 and numf == numfs:
newW=256;newH=256;
framecount = 0;
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for ind in xrange(numf):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
cv2.imwrite(dst,resizedImage)
else:
cv2.imwrite(dst,resizedImage)
print ' . ',
print dst , 'is created'
else:
with open('vids/'+videoId+'.txt','wb') as f:
f.write('error')
else:
print dst , 'is already there'
def convertTestVideos():
print "this is convertVideos function with labels"
ecount = 0
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in database.keys():
ecount+=1
if ecount>0:
videoInfo = database[videoId]
storageDir = imgDir+'v_'+videoId+"/"
print videoInfo['subset']
if not videoInfo['isnull'] and videoInfo['subset'] == 'testing':
videoname = vidDir+'v_'+videoId+'.mp4'
if not os.path.isfile(videoname):
videoname = vidDir+'v_'+videoId+'.mkv'
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
# annotations = videoInfo['annotations']
framelabels = np.ones(numfs,dtype='uint16')*200;
imgname = storageDir+str(numfs-1).zfill(5)+".jpg"
if os.path.isfile(imgname):
movefiles(storageDir,framelabels,numfs)
else:
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if not os.path.isfile(dst):
numf,width,height,fps,cap = getVidedInfo(videoname)
if not cap == -1 and numf == numfs:
newW=256;newH=256;
framecount = 0;
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for ind in xrange(numf):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
retval,image = cap.read()
if not image is None:
resizedImage = cv2.resize(image,(newW,newH))
cv2.imwrite(dst,resizedImage)
else:
cv2.imwrite(dst,resizedImage)
print ' . ',
print dst , 'is created'
else:
with open('vids/'+videoId+'.txt','wb') as f:
f.write('error')
else:
print dst , 'is already there'
def convertVideos():
print "this is convertVideos function"
## vidDir = vidDirtemp
vidlist = os.listdir(vidDir)
vidlist = [vid for vid in vidlist if vid.startswith("v_")]
print "Number of sucessfully donwloaded ",len(vidlist)
vcount =0
for videname in reversed(vidlist):
vcount+=1
if vcount>0:
src = vidDir+videname
numf,width,height,fps,cap = getVidedInfo(src)
if not cap == -1:
newW=256;newH=256;
print videname, width,height,' and newer are ',newW,newH, ' fps ',fps,' numf ', numf, ' vcount ',vcount
framecount = 0;
storageDir = imgDir+videname.split('.')[0]+"/"
imgname = storageDir+str(numf-1).zfill(5)+".jpg"
if not os.path.isfile(imgname):
if cap.isOpened():
if not os.path.isdir(storageDir):
os.mkdir(storageDir)
for f in xrange(numf):
retval,image = cap.read()
if not image is None:
# print np.shape(retval),np.shape(image), type(image),f
resizedImage = cv2.resize(image,(newW,newH))
imgname = storageDir+str(framecount).zfill(5)+".jpg"
cv2.imwrite(imgname,resizedImage)
else:
imgname = storageDir+str(framecount).zfill(5)+".jpg"
cv2.imwrite(imgname,resizedImage)
print 'we have missing frame ',framecount
framecount+=1
print imgname
else:
with open('vids/'+videname.split('.')[0]+'.txt','wb') as f:
f.write('error')
def getframelabels4both(annotations,numf,subset):
framelabels = np.ones(numf,dtype='uint16')*200;
if subset == 'testing':
return framelabels
for annot in annotations:
actionId = annot['class']
startframe = annot['sf']
endframe = annot['ef']
framelabels[startframe:endframe] = int(actionId)-1
return framelabels
def genVideoImageLists():
subset = 'testing'
print "this is genVideoImageLists function"
ecount = 0; vcount = 0;
listname = '{}lists/{}.list'.format(baseDir,subset)
fid = open(listname,'wb')
with open(annotPklFile,'rb') as f:
actNetDB = pickle.load(f)
actionIDs = actNetDB['actionIDs']; taxonomy=actNetDB['taxonomy']; database = actNetDB['database'];
for videoId in database.keys():
ecount+=1
if ecount>0:
videoInfo = database[videoId]
if not videoInfo['isnull'] and videoInfo['subset'] == subset:
vcount+=1
storageDir = imgDir+'v_'+videoId+"/"
videlistName = '{}lists/{}/v_{}.list'.format(baseDir,subset,videoId)
fid.write(videlistName+'\n');
vfid = open(videlistName,'wb');
print storageDir,' ecount ',ecount,videoInfo['subset']
numfs = videoInfo['numf']
annotations = videoInfo['annotations']
framelabels = getframelabels4both(annotations,numfs,subset)
dst = storageDir+str(numfs-1).zfill(5)+'-ActId'+str(framelabels[-1]).zfill(3)+'.jpg'
if os.path.isfile(dst):
for ind in xrange(numfs):
label = framelabels[ind]
dst = storageDir+str(ind).zfill(5)+'-ActId'+str(label).zfill(3)+'.jpg'
vfid.write(dst+'\n')
else:
RuntimeError('check if file exists '+dst)
def checkConverted():
print "this is checkConverted videos function"
vidlist = os.listdir(vidDir)
vidlist = [vid for vid in vidlist if vid.endswith(".mp4")]
print "Number of sucessfully donwloaded ",len(vidlist)
vcount =0
for videname in vidlist[15000:]:
src = vidDir+videname
numF = getNumFrames(src)
if numF>0:
imgname = imgDir+videname.split('.')[0]+"/"+str(numF-1).zfill(5)+".jpg"
print 'last frame is ',imgname,' vocunt ',vcount
vcount+=1
dst = vidDirtemp+videname
if not os.path.isfile(imgname):
shutil.move(src,dst)
print " moved this one to ", dst
if __name__=="__main__":
# checkConverted()
# convertVideosL()
# convertTestVideos()
genVideoImageLists()
| 42.851211 | 120 | 0.505087 | [
"MIT"
] | gurkirt/actNet-inAct | python-scripts/convertMP4toJPG.py | 12,384 | Python |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for conversion module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph import utils
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.impl import api
from tensorflow.python.autograph.impl import conversion
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.framework import constant_op
from tensorflow.python.keras.engine import training
from tensorflow.python.platform import test
class ConversionTest(test.TestCase):
def _simple_program_ctx(self):
return converter.ProgramContext(
options=converter.ConversionOptions(recursive=True),
autograph_module=api)
def test_is_whitelisted_for_graph(self):
def test_fn():
return constant_op.constant(1)
self.assertFalse(conversion.is_whitelisted_for_graph(test_fn))
self.assertTrue(conversion.is_whitelisted_for_graph(utils))
self.assertTrue(conversion.is_whitelisted_for_graph(constant_op.constant))
def test_entity_to_graph_unsupported_types(self):
with self.assertRaises(NotImplementedError):
program_ctx = self._simple_program_ctx()
conversion.entity_to_graph('dummy', program_ctx, None, None)
def test_entity_to_graph_callable(self):
b = 2
def f(a):
return a + b
program_ctx = self._simple_program_ctx()
nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertIs(ns['b'], b)
def test_entity_to_graph_function_with_defaults(self):
b = 2
c = 1
def f(a, d=c + 1):
return a + b + d
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual('tf__f', name)
self.assertEqual(
compiler.ast_to_source(fn_node.args.defaults[0]).strip(), 'None')
def test_entity_to_graph_call_tree(self):
def g(a):
return a
def f(a):
return g(a)
program_ctx = self._simple_program_ctx()
nodes, _, _ = conversion.entity_to_graph(f, program_ctx, None, None)
f_node = nodes[0]
self.assertEqual('tf__f', f_node.name)
def test_entity_to_graph_class_hierarchy(self):
class TestBase(object):
def __init__(self, x='base'):
self.x = x
def foo(self):
return self.x
def bar(self):
return self.x
class TestSubclass(TestBase):
def __init__(self, y):
super(TestSubclass, self).__init__('sub')
self.y = y
def foo(self):
return self.y
def baz(self):
return self.y
program_ctx = self._simple_program_ctx()
with self.assertRaisesRegex(NotImplementedError, 'classes.*whitelisted'):
conversion.entity_to_graph(TestSubclass, program_ctx, None, None)
def test_entity_to_graph_class_hierarchy_whitelisted(self):
class TestSubclass(training.Model):
def __init__(self, y):
super(TestSubclass, self).__init__()
self.built = False
def call(self, x):
return 3 * x
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.entity_to_graph(TestSubclass, program_ctx, None,
None)
class_node = nodes[-2] # TODO(mdan): This is brittle.
self.assertEqual(name, 'TfTestSubclass')
self.assertEqual(class_node.name, 'TfTestSubclass')
def test_entity_to_graph_lambda(self):
b = 2
f = lambda x: b * x if x > 0 else -x
program_ctx = self._simple_program_ctx()
nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(ns['b'], b)
def test_entity_to_graph_multiple_lambdas(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda y: b * y)
program_ctx = self._simple_program_ctx()
nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
self.assertIs(ns['a'], a)
def test_entity_to_graph_multiple_lambdas_ambiguous_definitions(self):
a, b = 1, 2
f, _ = (lambda x: a * x, lambda x: b * x)
program_ctx = self._simple_program_ctx()
with self.assertRaises(ValueError):
conversion.entity_to_graph(f, program_ctx, None, None)
def test_entity_to_graph_lambda_code_with_garbage(self):
# pylint:disable=g-long-lambda
f = ( # intentional wrap
lambda x: (x # intentional wrap
+ 1),)[0]
# pylint:enable=g-long-lambda
program_ctx = self._simple_program_ctx()
nodes, name, _ = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.Assign)
self.assertIsInstance(fn_node.value, gast.Lambda)
self.assertEqual('tf__lambda', name)
def test_entity_to_graph_nested_functions(self):
b = 2
def f(x):
def g(x):
return b * x
return g(x)
program_ctx = self._simple_program_ctx()
nodes, name, ns = conversion.entity_to_graph(f, program_ctx, None, None)
fn_node, _ = nodes
self.assertIsInstance(fn_node, gast.FunctionDef)
self.assertEqual(fn_node.name, 'tf__f')
self.assertEqual('tf__f', name)
self.assertIs(ns['b'], b)
def test_ag_module_cached(self):
def callee():
return range(3)
def caller(a):
return a()
program_ctx = self._simple_program_ctx()
_, _, callee_ns = conversion.entity_to_graph(callee, program_ctx, None,
None)
_, _, caller_ns = conversion.entity_to_graph(caller, program_ctx, None,
None)
self.assertTrue(callee_ns['ag__'] is caller_ns['ag__'])
if __name__ == '__main__':
test.main()
| 31.361991 | 80 | 0.68273 | [
"Apache-2.0"
] | GueroudjiAmal/tensorflow | tensorflow/python/autograph/impl/conversion_test.py | 6,931 | Python |
from distutils.core import setup
setup(name='bidict',
version='0.1',
description='A bi-directional dictionary API',
author='Jordan Epstein',
author_email='jorepstein1@gmail.com',
url='https://github.com/jorepstein1/bidict',
packages=['bidict'],
) | 28.8 | 52 | 0.652778 | [
"MIT"
] | jorepstein1/bidict | setup.py | 288 | Python |
# Copyright 2016-2021 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
#
# Meta-class for creating regression tests.
#
import functools
import types
import reframe.core.namespaces as namespaces
import reframe.core.parameters as parameters
import reframe.core.variables as variables
import reframe.core.hooks as hooks
import reframe.utility as utils
from reframe.core.exceptions import ReframeSyntaxError
from reframe.core.deferrable import deferrable, _DeferredPerformanceExpression
class RegressionTestMeta(type):
class MetaNamespace(namespaces.LocalNamespace):
'''Custom namespace to control the cls attribute assignment.
Regular Python class attributes can be overridden by either
parameters or variables respecting the order of execution.
A variable or a parameter may not be declared more than once in the
same class body. Overriding a variable with a parameter or the other
way around has an undefined behavior. A variable's value may be
updated multiple times within the same class body. A parameter's
value cannot be updated more than once within the same class body.
'''
def __setitem__(self, key, value):
if isinstance(value, variables.TestVar):
# Insert the attribute in the variable namespace
try:
self['_rfm_local_var_space'][key] = value
value.__set_name__(self, key)
except KeyError:
raise ReframeSyntaxError(
f'variable {key!r} is already declared'
) from None
# Override the regular class attribute (if present) and return
self._namespace.pop(key, None)
return
elif isinstance(value, parameters.TestParam):
# Insert the attribute in the parameter namespace
try:
self['_rfm_local_param_space'][key] = value
except KeyError:
raise ReframeSyntaxError(
f'parameter {key!r} is already declared in this class'
) from None
# Override the regular class attribute (if present) and return
self._namespace.pop(key, None)
return
elif key in self['_rfm_local_param_space']:
raise ReframeSyntaxError(
f'cannot override parameter {key!r}'
)
else:
# Insert the items manually to overide the namespace clash
# check from the base namespace.
self._namespace[key] = value
# Register functions decorated with either @sanity_function or
# @performance_variables or @performance_function decorators.
if hasattr(value, '_rfm_sanity_fn'):
try:
super().__setitem__('_rfm_sanity', value)
except KeyError:
raise ReframeSyntaxError(
'the @sanity_function decorator can only be used '
'once in the class body'
) from None
elif hasattr(value, '_rfm_perf_key'):
try:
self['_rfm_perf_fns'][key] = value
except KeyError:
raise ReframeSyntaxError(
f'the performance function {key!r} has already been '
f'defined in this class'
) from None
# Register the final methods
if hasattr(value, '_rfm_final'):
self['_rfm_final_methods'].add(key)
# Register the hooks - if a value does not meet the conditions
# it will be simply ignored
self['_rfm_hook_registry'].add(value)
def __getitem__(self, key):
'''Expose and control access to the local namespaces.
Variables may only be retrieved if their value has been previously
set. Accessing a parameter in the class body is disallowed (the
actual test parameter is set during the class instantiation).
'''
try:
return super().__getitem__(key)
except KeyError as err:
try:
# Handle variable access
return self['_rfm_local_var_space'][key]
except KeyError:
# Handle parameter access
if key in self['_rfm_local_param_space']:
raise ReframeSyntaxError(
'accessing a test parameter from the class '
'body is disallowed'
) from None
else:
# As the last resource, look if key is a variable in
# any of the base classes. If so, make its value
# available in the current class' namespace.
for b in self['_rfm_bases']:
if key in b._rfm_var_space:
# Store a deep-copy of the variable's
# value and return.
v = b._rfm_var_space[key].default_value
self._namespace[key] = v
return self._namespace[key]
# If 'key' is neither a variable nor a parameter,
# raise the exception from the base __getitem__.
raise err from None
def reset(self, key):
'''Reset an item to rerun it through the __setitem__ logic.'''
self[key] = self[key]
class WrappedFunction:
'''Descriptor to wrap a free function as a bound-method.
The free function object is wrapped by the constructor. Instances
of this class should be inserted into the namespace of the target class
with the desired name for the bound-method. Since this class is a
descriptor, the `__get__` method will return the right bound-method
when accessed from a class instance.
:meta private:
'''
__slots__ = ('fn')
def __init__(self, fn, name=None):
@functools.wraps(fn)
def _fn(*args, **kwargs):
return fn(*args, **kwargs)
self.fn = _fn
if name:
self.fn.__name__ = name
def __get__(self, obj, objtype=None):
if objtype is None:
objtype = type(obj)
self.fn.__qualname__ = '.'.join(
[objtype.__qualname__, self.fn.__name__]
)
if obj is None:
return self.fn
return types.MethodType(self.fn, obj)
def __call__(self, *args, **kwargs):
return self.fn(*args, **kwargs)
def __getattr__(self, name):
if name in self.__slots__:
return super().__getattr__(name)
else:
return getattr(self.fn, name)
def __setattr__(self, name, value):
if name in self.__slots__:
super().__setattr__(name, value)
else:
setattr(self.fn, name, value)
@classmethod
def __prepare__(metacls, name, bases, **kwargs):
namespace = super().__prepare__(name, bases, **kwargs)
# Keep reference to the bases inside the namespace
namespace['_rfm_bases'] = [
b for b in bases if hasattr(b, '_rfm_var_space')
]
# Regression test parameter space defined at the class level
local_param_space = namespaces.LocalNamespace()
namespace['_rfm_local_param_space'] = local_param_space
# Directive to insert a regression test parameter directly in the
# class body as: `P0 = parameter([0,1,2,3])`.
namespace['parameter'] = parameters.TestParam
# Regression test var space defined at the class level
local_var_space = namespaces.LocalNamespace()
namespace['_rfm_local_var_space'] = local_var_space
# Directives to add/modify a regression test variable
namespace['variable'] = variables.TestVar
namespace['required'] = variables.Undefined
# Utility decorators
namespace['_rfm_ext_bound'] = set()
def bind(fn, name=None):
'''Directive to bind a free function to a class.
See online docs for more information.
.. note::
Functions bound using this directive must be re-inspected after
the class body execution has completed. This directive attaches
the external method into the class namespace and returns the
associated instance of the :class:`WrappedFunction`. However,
this instance may be further modified by other ReFrame builtins
such as :func:`run_before`, :func:`run_after`, :func:`final` and
so on after it was added to the namespace, which would bypass
the logic implemented in the :func:`__setitem__` method from the
:class:`MetaNamespace` class. Hence, we track the items set by
this directive in the ``_rfm_ext_bound`` set, so they can be
later re-inspected.
'''
inst = metacls.WrappedFunction(fn, name)
namespace[inst.__name__] = inst
# Track the imported external functions
namespace['_rfm_ext_bound'].add(inst.__name__)
return inst
def final(fn):
'''Indicate that a function is final and cannot be overridden.'''
fn._rfm_final = True
return fn
namespace['bind'] = bind
namespace['final'] = final
namespace['_rfm_final_methods'] = set()
# Hook-related functionality
def run_before(stage):
'''Decorator for attaching a test method to a given stage.
See online docs for more information.
'''
return hooks.attach_to('pre_' + stage)
def run_after(stage):
'''Decorator for attaching a test method to a given stage.
See online docs for more information.
'''
return hooks.attach_to('post_' + stage)
namespace['run_before'] = run_before
namespace['run_after'] = run_after
namespace['require_deps'] = hooks.require_deps
namespace['_rfm_hook_registry'] = hooks.HookRegistry()
# Machinery to add a sanity function
def sanity_function(fn):
'''Mark a function as the test's sanity function.
Decorated functions must be unary and they will be converted into
deferred expressions.
'''
_def_fn = deferrable(fn)
setattr(_def_fn, '_rfm_sanity_fn', True)
return _def_fn
namespace['sanity_function'] = sanity_function
namespace['deferrable'] = deferrable
# Machinery to add performance functions
def performance_function(units, *, perf_key=None):
'''Decorate a function to extract a performance variable.
The ``units`` argument indicates the units of the performance
variable to be extracted.
The ``perf_key`` optional arg will be used as the name of the
performance variable. If not provided, the function name will
be used as the performance variable name.
'''
if not isinstance(units, str):
raise TypeError('performance units must be a string')
if perf_key and not isinstance(perf_key, str):
raise TypeError("'perf_key' must be a string")
def _deco_wrapper(func):
if not utils.is_trivially_callable(func, non_def_args=1):
raise TypeError(
f'performance function {func.__name__!r} has more '
f'than one argument without a default value'
)
@functools.wraps(func)
def _perf_fn(*args, **kwargs):
return _DeferredPerformanceExpression(
func, units, *args, **kwargs
)
_perf_key = perf_key if perf_key else func.__name__
setattr(_perf_fn, '_rfm_perf_key', _perf_key)
return _perf_fn
return _deco_wrapper
namespace['performance_function'] = performance_function
namespace['_rfm_perf_fns'] = namespaces.LocalNamespace()
return metacls.MetaNamespace(namespace)
def __new__(metacls, name, bases, namespace, **kwargs):
'''Remove directives from the class namespace.
It does not make sense to have some directives available after the
class was created or even at the instance level (e.g. doing
``self.parameter([1, 2, 3])`` does not make sense). So here, we
intercept those directives out of the namespace before the class is
constructed.
'''
directives = [
'parameter', 'variable', 'bind', 'run_before', 'run_after',
'require_deps', 'required', 'deferrable', 'sanity_function',
'final', 'performance_function'
]
for b in directives:
namespace.pop(b, None)
# Reset the external functions imported through the bind directive.
for item in namespace.pop('_rfm_ext_bound'):
namespace.reset(item)
return super().__new__(metacls, name, bases, dict(namespace), **kwargs)
def __init__(cls, name, bases, namespace, **kwargs):
super().__init__(name, bases, namespace, **kwargs)
# Create a set with the attribute names already in use.
cls._rfm_dir = set()
for base in (b for b in bases if hasattr(b, '_rfm_dir')):
cls._rfm_dir.update(base._rfm_dir)
used_attribute_names = set(cls._rfm_dir)
# Build the var space and extend the target namespace
variables.VarSpace(cls, used_attribute_names)
used_attribute_names.update(cls._rfm_var_space.vars)
# Build the parameter space
parameters.ParamSpace(cls, used_attribute_names)
# Update used names set with the local __dict__
cls._rfm_dir.update(cls.__dict__)
# Update the hook registry with the bases
for base in cls._rfm_bases:
cls._rfm_hook_registry.update(
base._rfm_hook_registry, denied_hooks=namespace
)
# Search the bases if no local sanity functions exist.
if '_rfm_sanity' not in namespace:
for base in cls._rfm_bases:
if hasattr(base, '_rfm_sanity'):
cls._rfm_sanity = getattr(base, '_rfm_sanity')
if cls._rfm_sanity.__name__ in namespace:
raise ReframeSyntaxError(
f'{cls.__qualname__!r} overrides the candidate '
f'sanity function '
f'{cls._rfm_sanity.__qualname__!r} without '
f'defining an alternative'
)
break
# Update the performance function dict with the bases.
for base in cls._rfm_bases:
for k, v in base._rfm_perf_fns.items():
if k not in namespace:
try:
cls._rfm_perf_fns[k] = v
except KeyError:
'''Performance function overridden by other class'''
# Add the final functions from its parents
cls._rfm_final_methods.update(
*(b._rfm_final_methods for b in cls._rfm_bases)
)
if getattr(cls, '_rfm_override_final', None):
return
for b in cls._rfm_bases:
for key in b._rfm_final_methods:
if key in namespace and callable(namespace[key]):
msg = (f"'{cls.__qualname__}.{key}' attempts to "
f"override final method "
f"'{b.__qualname__}.{key}'; "
f"you should use the pipeline hooks instead")
raise ReframeSyntaxError(msg)
def __call__(cls, *args, **kwargs):
'''Inject parameter and variable spaces during object construction.
When a class is instantiated, this method intercepts the arguments
associated to the parameter and variable spaces. This prevents both
:func:`__new__` and :func:`__init__` methods from ever seing these
arguments.
The parameter and variable spaces are injected into the object after
construction and before initialization.
'''
# Intercept constructor arguments
_rfm_use_params = kwargs.pop('_rfm_use_params', False)
obj = cls.__new__(cls, *args, **kwargs)
# Insert the var & param spaces
cls._rfm_var_space.inject(obj, cls)
cls._rfm_param_space.inject(obj, cls, _rfm_use_params)
obj.__init__(*args, **kwargs)
return obj
def __getattribute__(cls, name):
'''Attribute lookup method for custom class attributes.
ReFrame test variables are descriptors injected at the class level.
If a variable descriptor has already been injected into the class,
do not return the descriptor object and return the default value
associated with that variable instead.
.. warning::
.. versionchanged:: 3.7.0
Prior versions exposed the variable descriptor object if this
was already present in the class, instead of returning the
variable's default value.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
except AttributeError:
var_space = None
# If the variable is already injected, delegate lookup to __getattr__.
if var_space and name in var_space.injected_vars:
raise AttributeError('delegate variable lookup to __getattr__')
# Default back to the base method if no special treatment required.
return super().__getattribute__(name)
def __getattr__(cls, name):
'''Backup attribute lookup method into custom namespaces.
Some ReFrame built-in types are stored under their own sub-namespaces.
This method will perform an attribute lookup on these sub-namespaces
if a call to the default :func:`__getattribute__` method fails to
retrieve the requested class attribute.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
return var_space.vars[name]
except AttributeError:
'''Catch early access attempt to the variable space.'''
except KeyError:
'''Requested name not in variable space.'''
try:
param_space = super().__getattribute__('_rfm_param_space')
return param_space.params[name]
except AttributeError:
'''Catch early access attempt to the parameter space.'''
except KeyError:
'''Requested name not in parameter space.'''
raise AttributeError(
f'class {cls.__qualname__!r} has no attribute {name!r}'
) from None
def setvar(cls, name, value):
'''Set the value of a variable.
:param name: The name of the variable.
:param value: The value of the variable.
:returns: :class:`True` if the variable was set.
A variable will *not* be set, if it does not exist or when an
attempt is made to set it with its underlying descriptor.
This happens during the variable injection time and it should be
delegated to the class' :func:`__setattr__` method.
:raises ReframeSyntaxError: If an attempt is made to override a
variable with a descriptor other than its underlying one.
'''
try:
var_space = super().__getattribute__('_rfm_var_space')
if name in var_space:
if not hasattr(value, '__get__'):
var_space[name].define(value)
return True
elif var_space[name].field is not value:
desc = '.'.join([cls.__qualname__, name])
raise ReframeSyntaxError(
f'cannot override variable descriptor {desc!r}'
)
else:
# Variable is being injected
return False
except AttributeError:
'''Catch early access attempt to the variable space.'''
return False
def __setattr__(cls, name, value):
'''Handle the special treatment required for variables and parameters.
A variable's default value can be updated when accessed as a regular
class attribute. This behavior does not apply when the assigned value
is a descriptor object. In that case, the task of setting the value is
delegated to the base :func:`__setattr__` (this is to comply with
standard Python behavior). However, since the variables are already
descriptors which are injected during class instantiation, we disallow
any attempt to override this descriptor (since it would be silently
re-overridden in any case).
Altering the value of a parameter when accessed as a class attribute
is not allowed. This would break the parameter space internals.
'''
# Try to treat `name` as variable
if cls.setvar(name, value):
return
# Try to treat `name` as a parameter
try:
# Catch attempts to override a test parameter
param_space = super().__getattribute__('_rfm_param_space')
if name in param_space.params:
raise ReframeSyntaxError(f'cannot override parameter {name!r}')
except AttributeError:
'''Catch early access attempt to the parameter space.'''
# Treat `name` as normal class attribute
super().__setattr__(name, value)
@property
def param_space(cls):
''' Make the parameter space available as read-only.'''
return cls._rfm_param_space
def is_abstract(cls):
'''Check if the class is an abstract test.
This is the case when some parameters are undefined, which results in
the length of the parameter space being 0.
:return: bool indicating whether the test has undefined parameters.
:meta private:
'''
return len(cls.param_space) == 0
| 39.432479 | 79 | 0.588304 | [
"BSD-3-Clause"
] | ChristopherBignamini/reframe | reframe/core/meta.py | 23,068 | Python |